python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MSI DIGIVOX mini III remote controller keytable
*
* Copyright (C) 2013 Antti Palosaari <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
/*
* Derived from MSI DIGIVOX mini III remote (rc-msi-digivox-iii.c)
*
* Differences between these remotes are:
*
* 1) scancode 0x61d601 is mapped to different button:
* MSI DIGIVOX mini III "Source" = KEY_VIDEO
* Reddo "EPG" = KEY_EPG
*
* 2) Reddo remote has less buttons. Missing buttons are: colored buttons,
* navigation buttons and main power button.
*/
static struct rc_map_table reddo[] = {
{ 0x61d601, KEY_EPG }, /* EPG */
{ 0x61d602, KEY_NUMERIC_3 },
{ 0x61d604, KEY_NUMERIC_1 },
{ 0x61d605, KEY_NUMERIC_5 },
{ 0x61d606, KEY_NUMERIC_6 },
{ 0x61d607, KEY_CHANNELDOWN }, /* CH- */
{ 0x61d608, KEY_NUMERIC_2 },
{ 0x61d609, KEY_CHANNELUP }, /* CH+ */
{ 0x61d60a, KEY_NUMERIC_9 },
{ 0x61d60b, KEY_ZOOM }, /* Zoom */
{ 0x61d60c, KEY_NUMERIC_7 },
{ 0x61d60d, KEY_NUMERIC_8 },
{ 0x61d60e, KEY_VOLUMEUP }, /* Vol+ */
{ 0x61d60f, KEY_NUMERIC_4 },
{ 0x61d610, KEY_ESC }, /* [back up arrow] */
{ 0x61d611, KEY_NUMERIC_0 },
{ 0x61d612, KEY_OK }, /* [enter arrow] */
{ 0x61d613, KEY_VOLUMEDOWN }, /* Vol- */
{ 0x61d614, KEY_RECORD }, /* Rec */
{ 0x61d615, KEY_STOP }, /* Stop */
{ 0x61d616, KEY_PLAY }, /* Play */
{ 0x61d617, KEY_MUTE }, /* Mute */
{ 0x61d643, KEY_POWER2 }, /* [red power button] */
};
static struct rc_map_list reddo_map = {
.map = {
.scan = reddo,
.size = ARRAY_SIZE(reddo),
.rc_proto = RC_PROTO_NECX,
.name = RC_MAP_REDDO,
}
};
static int __init init_rc_map_reddo(void)
{
return rc_map_register(&reddo_map);
}
static void __exit exit_rc_map_reddo(void)
{
rc_map_unregister(&reddo_map);
}
module_init(init_rc_map_reddo)
module_exit(exit_rc_map_reddo)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-reddo.c |
// SPDX-License-Identifier: GPL-2.0+
// Keytable for Xbox DVD remote
// Copyright (c) 2018 by Benjamin Valentin <[email protected]>
#include <media/rc-map.h>
#include <linux/module.h>
/* based on lircd.conf.xbox */
static struct rc_map_table xbox_dvd[] = {
{0xa0b, KEY_OK},
{0xaa6, KEY_UP},
{0xaa7, KEY_DOWN},
{0xaa8, KEY_RIGHT},
{0xaa9, KEY_LEFT},
{0xac3, KEY_INFO},
{0xac6, KEY_NUMERIC_9},
{0xac7, KEY_NUMERIC_8},
{0xac8, KEY_NUMERIC_7},
{0xac9, KEY_NUMERIC_6},
{0xaca, KEY_NUMERIC_5},
{0xacb, KEY_NUMERIC_4},
{0xacc, KEY_NUMERIC_3},
{0xacd, KEY_NUMERIC_2},
{0xace, KEY_NUMERIC_1},
{0xacf, KEY_NUMERIC_0},
{0xad5, KEY_ANGLE},
{0xad8, KEY_BACK},
{0xadd, KEY_PREVIOUSSONG},
{0xadf, KEY_NEXTSONG},
{0xae0, KEY_STOP},
{0xae2, KEY_REWIND},
{0xae3, KEY_FASTFORWARD},
{0xae5, KEY_TITLE},
{0xae6, KEY_PAUSE},
{0xaea, KEY_PLAY},
{0xaf7, KEY_MENU},
};
static struct rc_map_list xbox_dvd_map = {
.map = {
.scan = xbox_dvd,
.size = ARRAY_SIZE(xbox_dvd),
.rc_proto = RC_PROTO_XBOX_DVD,
.name = RC_MAP_XBOX_DVD,
}
};
static int __init init_rc_map(void)
{
return rc_map_register(&xbox_dvd_map);
}
static void __exit exit_rc_map(void)
{
rc_map_unregister(&xbox_dvd_map);
}
module_init(init_rc_map)
module_exit(exit_rc_map)
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/keymaps/rc-xbox-dvd.c |
// SPDX-License-Identifier: GPL-2.0+
// pctv-sedna.h - Keytable for pctv_sedna Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/* Mapping for the 28 key remote control as seen at
http://www.sednacomputer.com/photo/cardbus-tv.jpg
Pavel Mihaylov <[email protected]>
Also for the remote bundled with Kozumi KTV-01C card */
static struct rc_map_table pctv_sedna[] = {
{ 0x00, KEY_NUMERIC_0 },
{ 0x01, KEY_NUMERIC_1 },
{ 0x02, KEY_NUMERIC_2 },
{ 0x03, KEY_NUMERIC_3 },
{ 0x04, KEY_NUMERIC_4 },
{ 0x05, KEY_NUMERIC_5 },
{ 0x06, KEY_NUMERIC_6 },
{ 0x07, KEY_NUMERIC_7 },
{ 0x08, KEY_NUMERIC_8 },
{ 0x09, KEY_NUMERIC_9 },
{ 0x0a, KEY_AGAIN }, /* Recall */
{ 0x0b, KEY_CHANNELUP },
{ 0x0c, KEY_VOLUMEUP },
{ 0x0d, KEY_MODE }, /* Stereo */
{ 0x0e, KEY_STOP },
{ 0x0f, KEY_PREVIOUSSONG },
{ 0x10, KEY_ZOOM },
{ 0x11, KEY_VIDEO }, /* Source */
{ 0x12, KEY_POWER },
{ 0x13, KEY_MUTE },
{ 0x15, KEY_CHANNELDOWN },
{ 0x18, KEY_VOLUMEDOWN },
{ 0x19, KEY_CAMERA }, /* Snapshot */
{ 0x1a, KEY_NEXTSONG },
{ 0x1b, KEY_TIME }, /* Time Shift */
{ 0x1c, KEY_RADIO }, /* FM Radio */
{ 0x1d, KEY_RECORD },
{ 0x1e, KEY_PAUSE },
/* additional codes for Kozumi's remote */
{ 0x14, KEY_INFO }, /* OSD */
{ 0x16, KEY_OK }, /* OK */
{ 0x17, KEY_DIGITS }, /* Plus */
{ 0x1f, KEY_PLAY }, /* Play */
};
static struct rc_map_list pctv_sedna_map = {
.map = {
.scan = pctv_sedna,
.size = ARRAY_SIZE(pctv_sedna),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_PCTV_SEDNA,
}
};
static int __init init_rc_map_pctv_sedna(void)
{
return rc_map_register(&pctv_sedna_map);
}
static void __exit exit_rc_map_pctv_sedna(void)
{
rc_map_unregister(&pctv_sedna_map);
}
module_init(init_rc_map_pctv_sedna)
module_exit(exit_rc_map_pctv_sedna)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-pctv-sedna.c |
// SPDX-License-Identifier: GPL-2.0+
// flydvb.h - Keytable for flydvb Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table flydvb[] = {
{ 0x01, KEY_ZOOM }, /* Full Screen */
{ 0x00, KEY_POWER }, /* Power */
{ 0x03, KEY_NUMERIC_1 },
{ 0x04, KEY_NUMERIC_2 },
{ 0x05, KEY_NUMERIC_3 },
{ 0x07, KEY_NUMERIC_4 },
{ 0x08, KEY_NUMERIC_5 },
{ 0x09, KEY_NUMERIC_6 },
{ 0x0b, KEY_NUMERIC_7 },
{ 0x0c, KEY_NUMERIC_8 },
{ 0x0d, KEY_NUMERIC_9 },
{ 0x06, KEY_AGAIN }, /* Recall */
{ 0x0f, KEY_NUMERIC_0 },
{ 0x10, KEY_MUTE }, /* Mute */
{ 0x02, KEY_RADIO }, /* TV/Radio */
{ 0x1b, KEY_LANGUAGE }, /* SAP (Second Audio Program) */
{ 0x14, KEY_VOLUMEUP }, /* VOL+ */
{ 0x17, KEY_VOLUMEDOWN }, /* VOL- */
{ 0x12, KEY_CHANNELUP }, /* CH+ */
{ 0x13, KEY_CHANNELDOWN }, /* CH- */
{ 0x1d, KEY_ENTER }, /* Enter */
{ 0x1a, KEY_TV2 }, /* PIP */
{ 0x18, KEY_VIDEO }, /* Source */
{ 0x1e, KEY_RECORD }, /* Record/Pause */
{ 0x15, KEY_ANGLE }, /* Swap (no label on key) */
{ 0x1c, KEY_PAUSE }, /* Timeshift/Pause */
{ 0x19, KEY_BACK }, /* Rewind << */
{ 0x0a, KEY_PLAYPAUSE }, /* Play/Pause */
{ 0x1f, KEY_FORWARD }, /* Forward >> */
{ 0x16, KEY_PREVIOUS }, /* Back |<< */
{ 0x11, KEY_STOP }, /* Stop */
{ 0x0e, KEY_NEXT }, /* End >>| */
};
static struct rc_map_list flydvb_map = {
.map = {
.scan = flydvb,
.size = ARRAY_SIZE(flydvb),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_FLYDVB,
}
};
static int __init init_rc_map_flydvb(void)
{
return rc_map_register(&flydvb_map);
}
static void __exit exit_rc_map_flydvb(void)
{
rc_map_unregister(&flydvb_map);
}
module_init(init_rc_map_flydvb)
module_exit(exit_rc_map_flydvb)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-flydvb.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (C) 2019 Christian Hewitt <[email protected]>
#include <media/rc-map.h>
#include <linux/module.h>
//
// Keytable for the Tronsmart Vega S9x remote control
//
static struct rc_map_table vega_s9x[] = {
{ 0x18, KEY_POWER },
{ 0x17, KEY_MUTE }, // mouse
{ 0x46, KEY_UP },
{ 0x47, KEY_LEFT },
{ 0x55, KEY_OK },
{ 0x15, KEY_RIGHT },
{ 0x16, KEY_DOWN },
{ 0x06, KEY_HOME },
{ 0x42, KEY_PLAYPAUSE},
{ 0x40, KEY_BACK },
{ 0x14, KEY_VOLUMEDOWN },
{ 0x04, KEY_MENU },
{ 0x10, KEY_VOLUMEUP },
};
static struct rc_map_list vega_s9x_map = {
.map = {
.scan = vega_s9x,
.size = ARRAY_SIZE(vega_s9x),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_VEGA_S9X,
}
};
static int __init init_rc_map_vega_s9x(void)
{
return rc_map_register(&vega_s9x_map);
}
static void __exit exit_rc_map_vega_s9x(void)
{
rc_map_unregister(&vega_s9x_map);
}
module_init(init_rc_map_vega_s9x)
module_exit(exit_rc_map_vega_s9x)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Hewitt <[email protected]");
| linux-master | drivers/media/rc/keymaps/rc-vega-s9x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Sanechips Technology Co., Ltd.
* Copyright 2017 Linaro Ltd.
*/
#include <linux/module.h>
#include <media/rc-map.h>
static struct rc_map_table zx_irdec_table[] = {
{ 0x01, KEY_NUMERIC_1 },
{ 0x02, KEY_NUMERIC_2 },
{ 0x03, KEY_NUMERIC_3 },
{ 0x04, KEY_NUMERIC_4 },
{ 0x05, KEY_NUMERIC_5 },
{ 0x06, KEY_NUMERIC_6 },
{ 0x07, KEY_NUMERIC_7 },
{ 0x08, KEY_NUMERIC_8 },
{ 0x09, KEY_NUMERIC_9 },
{ 0x31, KEY_NUMERIC_0 },
{ 0x16, KEY_DELETE },
{ 0x0a, KEY_MODE }, /* Input method */
{ 0x0c, KEY_VOLUMEUP },
{ 0x18, KEY_VOLUMEDOWN },
{ 0x0b, KEY_CHANNELUP },
{ 0x15, KEY_CHANNELDOWN },
{ 0x0d, KEY_PAGEUP },
{ 0x13, KEY_PAGEDOWN },
{ 0x46, KEY_FASTFORWARD },
{ 0x43, KEY_REWIND },
{ 0x44, KEY_PLAYPAUSE },
{ 0x45, KEY_STOP },
{ 0x49, KEY_OK },
{ 0x47, KEY_UP },
{ 0x4b, KEY_DOWN },
{ 0x48, KEY_LEFT },
{ 0x4a, KEY_RIGHT },
{ 0x4d, KEY_MENU },
{ 0x56, KEY_APPSELECT }, /* Application */
{ 0x4c, KEY_BACK },
{ 0x1e, KEY_INFO },
{ 0x4e, KEY_F1 },
{ 0x4f, KEY_F2 },
{ 0x50, KEY_F3 },
{ 0x51, KEY_F4 },
{ 0x1c, KEY_AUDIO },
{ 0x12, KEY_MUTE },
{ 0x11, KEY_DOT }, /* Location */
{ 0x1d, KEY_SETUP },
{ 0x40, KEY_POWER },
};
static struct rc_map_list zx_irdec_map = {
.map = {
.scan = zx_irdec_table,
.size = ARRAY_SIZE(zx_irdec_table),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_ZX_IRDEC,
}
};
static int __init init_rc_map_zx_irdec(void)
{
return rc_map_register(&zx_irdec_map);
}
static void __exit exit_rc_map_zx_irdec(void)
{
rc_map_unregister(&zx_irdec_map);
}
module_init(init_rc_map_zx_irdec)
module_exit(exit_rc_map_zx_irdec)
MODULE_AUTHOR("Shawn Guo <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/rc/keymaps/rc-zx-irdec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ATI X10 RF remote keytable
*
* Copyright (C) 2011 Anssi Hannula <anssi.hannula@?ki.fi>
*
* This file is based on the static generic keytable previously found in
* ati_remote.c, which is
* Copyright (c) 2004 Torrey Hoffman <[email protected]>
* Copyright (c) 2002 Vladimir Dergachev
*/
#include <linux/module.h>
#include <media/rc-map.h>
/*
* Intended usage comments below are from vendor-supplied
* Source: ATI REMOTE WONDER™ Installation Guide
* http://www2.ati.com/manuals/remctrl.pdf
*
* Scancodes were in strict left-right, top-bottom order on the
* original ATI Remote Wonder, but were moved on later models.
*
* Keys A-F are intended to be user-programmable.
*/
static struct rc_map_table ati_x10[] = {
/* keyboard - Above the cursor pad */
{ 0x00, KEY_A },
{ 0x01, KEY_B },
{ 0x02, KEY_POWER }, /* Power */
{ 0x03, KEY_TV }, /* TV */
{ 0x04, KEY_DVD }, /* DVD */
{ 0x05, KEY_WWW }, /* WEB */
{ 0x06, KEY_BOOKMARKS }, /* "book": Open Media Library */
{ 0x07, KEY_EDIT }, /* "hand": Toggle left mouse button (grab) */
/* Mouse emulation pad goes here, handled by driver separately */
{ 0x09, KEY_VOLUMEDOWN }, /* VOL + */
{ 0x08, KEY_VOLUMEUP }, /* VOL - */
{ 0x0a, KEY_MUTE }, /* MUTE */
{ 0x0b, KEY_CHANNELUP }, /* CH + */
{ 0x0c, KEY_CHANNELDOWN },/* CH - */
/*
* We could use KEY_NUMERIC_x for these, but the X11 protocol
* has problems with keycodes greater than 255, so avoid those high
* keycodes in default maps.
*/
{ 0x0d, KEY_NUMERIC_1 },
{ 0x0e, KEY_NUMERIC_2 },
{ 0x0f, KEY_NUMERIC_3 },
{ 0x10, KEY_NUMERIC_4 },
{ 0x11, KEY_NUMERIC_5 },
{ 0x12, KEY_NUMERIC_6 },
{ 0x13, KEY_NUMERIC_7 },
{ 0x14, KEY_NUMERIC_8 },
{ 0x15, KEY_NUMERIC_9 },
{ 0x16, KEY_MENU }, /* "menu": DVD root menu */
/* KEY_NUMERIC_STAR? */
{ 0x17, KEY_NUMERIC_0 },
{ 0x18, KEY_SETUP }, /* "check": DVD setup menu */
/* KEY_NUMERIC_POUND? */
/* DVD navigation buttons */
{ 0x19, KEY_C },
{ 0x1a, KEY_UP }, /* up */
{ 0x1b, KEY_D },
{ 0x1c, KEY_PROPS }, /* "timer" Should be Data On Screen */
/* Symbol is "circle nailed to box" */
{ 0x1d, KEY_LEFT }, /* left */
{ 0x1e, KEY_OK }, /* "OK" */
{ 0x1f, KEY_RIGHT }, /* right */
{ 0x20, KEY_SCREEN }, /* "max" (X11 warning: 0x177) */
/* Should be AC View Toggle, but
that's not in <input/input.h>.
KEY_ZOOM (0x174)? */
{ 0x21, KEY_E },
{ 0x22, KEY_DOWN }, /* down */
{ 0x23, KEY_F },
/* Play/stop/pause buttons */
{ 0x24, KEY_REWIND }, /* (<<) Rewind */
{ 0x25, KEY_PLAY }, /* ( >) Play (KEY_PLAYCD?) */
{ 0x26, KEY_FASTFORWARD }, /* (>>) Fast forward */
{ 0x27, KEY_RECORD }, /* ( o) red */
{ 0x28, KEY_STOPCD }, /* ([]) Stop (KEY_STOP is something else!) */
{ 0x29, KEY_PAUSE }, /* ('') Pause (KEY_PAUSECD?) */
/* Extra keys, not on the original ATI remote */
{ 0x2a, KEY_NEXT }, /* (>+) */
{ 0x2b, KEY_PREVIOUS }, /* (<-) */
{ 0x2d, KEY_INFO }, /* PLAYING (X11 warning: 0x166) */
{ 0x2e, KEY_HOME }, /* TOP */
{ 0x2f, KEY_END }, /* END */
{ 0x30, KEY_SELECT }, /* SELECT (X11 warning: 0x161) */
};
static struct rc_map_list ati_x10_map = {
.map = {
.scan = ati_x10,
.size = ARRAY_SIZE(ati_x10),
.rc_proto = RC_PROTO_OTHER,
.name = RC_MAP_ATI_X10,
}
};
static int __init init_rc_map_ati_x10(void)
{
return rc_map_register(&ati_x10_map);
}
static void __exit exit_rc_map_ati_x10(void)
{
rc_map_unregister(&ati_x10_map);
}
module_init(init_rc_map_ati_x10)
module_exit(exit_rc_map_ati_x10)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-ati-x10.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Digittrade DVB-T USB Stick remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* Digittrade DVB-T USB Stick remote controller. */
/* Imported from af9015.h.
Initial keytable was from Alain Kalker <[email protected]> */
/* Digittrade DVB-T USB Stick */
static struct rc_map_table digittrade[] = {
{ 0x0000, KEY_NUMERIC_9 },
{ 0x0001, KEY_EPG }, /* EPG */
{ 0x0002, KEY_VOLUMEDOWN }, /* Vol Dn */
{ 0x0003, KEY_TEXT }, /* TELETEXT */
{ 0x0004, KEY_NUMERIC_8 },
{ 0x0005, KEY_MUTE }, /* MUTE */
{ 0x0006, KEY_POWER2 }, /* POWER */
{ 0x0009, KEY_ZOOM }, /* FULLSCREEN */
{ 0x000a, KEY_RECORD }, /* RECORD */
{ 0x000d, KEY_SUBTITLE }, /* SUBTITLE */
{ 0x000e, KEY_STOP }, /* STOP */
{ 0x0010, KEY_OK }, /* RETURN */
{ 0x0011, KEY_NUMERIC_2 },
{ 0x0012, KEY_NUMERIC_4 },
{ 0x0015, KEY_NUMERIC_3 },
{ 0x0016, KEY_NUMERIC_5 },
{ 0x0017, KEY_CHANNELDOWN }, /* Ch Dn */
{ 0x0019, KEY_CHANNELUP }, /* CH Up */
{ 0x001a, KEY_PAUSE }, /* PAUSE */
{ 0x001b, KEY_NUMERIC_1 },
{ 0x001d, KEY_AUDIO }, /* DUAL SOUND */
{ 0x001e, KEY_PLAY }, /* PLAY */
{ 0x001f, KEY_CAMERA }, /* SNAPSHOT */
{ 0x0040, KEY_VOLUMEUP }, /* Vol Up */
{ 0x0048, KEY_NUMERIC_7 },
{ 0x004c, KEY_NUMERIC_6 },
{ 0x004d, KEY_PLAYPAUSE }, /* TIMESHIFT */
{ 0x0054, KEY_NUMERIC_0 },
};
static struct rc_map_list digittrade_map = {
.map = {
.scan = digittrade,
.size = ARRAY_SIZE(digittrade),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_DIGITTRADE,
}
};
static int __init init_rc_map_digittrade(void)
{
return rc_map_register(&digittrade_map);
}
static void __exit exit_rc_map_digittrade(void)
{
rc_map_unregister(&digittrade_map);
}
module_init(init_rc_map_digittrade)
module_exit(exit_rc_map_digittrade)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-digittrade.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MSI DIGIVOX mini III remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* MSI DIGIVOX mini III */
/* Uses NEC extended 0x61d6. */
/* This remote seems to be same as rc-kworld-315u.c. Anyhow, add new remote
since rc-kworld-315u.c lacks NEC extended address byte. */
static struct rc_map_table msi_digivox_iii[] = {
{ 0x61d601, KEY_VIDEO }, /* Source */
{ 0x61d602, KEY_NUMERIC_3 },
{ 0x61d603, KEY_POWER }, /* ShutDown */
{ 0x61d604, KEY_NUMERIC_1 },
{ 0x61d605, KEY_NUMERIC_5 },
{ 0x61d606, KEY_NUMERIC_6 },
{ 0x61d607, KEY_CHANNELDOWN }, /* CH- */
{ 0x61d608, KEY_NUMERIC_2 },
{ 0x61d609, KEY_CHANNELUP }, /* CH+ */
{ 0x61d60a, KEY_NUMERIC_9 },
{ 0x61d60b, KEY_ZOOM }, /* Zoom */
{ 0x61d60c, KEY_NUMERIC_7 },
{ 0x61d60d, KEY_NUMERIC_8 },
{ 0x61d60e, KEY_VOLUMEUP }, /* Vol+ */
{ 0x61d60f, KEY_NUMERIC_4 },
{ 0x61d610, KEY_ESC }, /* [back up arrow] */
{ 0x61d611, KEY_NUMERIC_0 },
{ 0x61d612, KEY_OK }, /* [enter arrow] */
{ 0x61d613, KEY_VOLUMEDOWN }, /* Vol- */
{ 0x61d614, KEY_RECORD }, /* Rec */
{ 0x61d615, KEY_STOP }, /* Stop */
{ 0x61d616, KEY_PLAY }, /* Play */
{ 0x61d617, KEY_MUTE }, /* Mute */
{ 0x61d618, KEY_UP },
{ 0x61d619, KEY_DOWN },
{ 0x61d61a, KEY_LEFT },
{ 0x61d61b, KEY_RIGHT },
{ 0x61d61c, KEY_RED },
{ 0x61d61d, KEY_GREEN },
{ 0x61d61e, KEY_YELLOW },
{ 0x61d61f, KEY_BLUE },
{ 0x61d643, KEY_POWER2 }, /* [red power button] */
};
static struct rc_map_list msi_digivox_iii_map = {
.map = {
.scan = msi_digivox_iii,
.size = ARRAY_SIZE(msi_digivox_iii),
.rc_proto = RC_PROTO_NECX,
.name = RC_MAP_MSI_DIGIVOX_III,
}
};
static int __init init_rc_map_msi_digivox_iii(void)
{
return rc_map_register(&msi_digivox_iii_map);
}
static void __exit exit_rc_map_msi_digivox_iii(void)
{
rc_map_unregister(&msi_digivox_iii_map);
}
module_init(init_rc_map_msi_digivox_iii)
module_exit(exit_rc_map_msi_digivox_iii)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-msi-digivox-iii.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LeadTek Y04G0051 remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <[email protected]>
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table leadtek_y04g0051[] = {
{ 0x0300, KEY_POWER2 },
{ 0x0303, KEY_SCREEN },
{ 0x0304, KEY_RIGHT },
{ 0x0305, KEY_NUMERIC_1 },
{ 0x0306, KEY_NUMERIC_2 },
{ 0x0307, KEY_NUMERIC_3 },
{ 0x0308, KEY_LEFT },
{ 0x0309, KEY_NUMERIC_4 },
{ 0x030a, KEY_NUMERIC_5 },
{ 0x030b, KEY_NUMERIC_6 },
{ 0x030c, KEY_UP },
{ 0x030d, KEY_NUMERIC_7 },
{ 0x030e, KEY_NUMERIC_8 },
{ 0x030f, KEY_NUMERIC_9 },
{ 0x0310, KEY_DOWN },
{ 0x0311, KEY_AGAIN },
{ 0x0312, KEY_NUMERIC_0 },
{ 0x0313, KEY_OK }, /* 1st ok */
{ 0x0314, KEY_MUTE },
{ 0x0316, KEY_OK }, /* 2nd ok */
{ 0x031e, KEY_VIDEO }, /* 2nd video */
{ 0x031b, KEY_AUDIO },
{ 0x031f, KEY_TEXT },
{ 0x0340, KEY_SLEEP },
{ 0x0341, KEY_DOT },
{ 0x0342, KEY_REWIND },
{ 0x0343, KEY_PLAY },
{ 0x0344, KEY_FASTFORWARD },
{ 0x0345, KEY_TIME },
{ 0x0346, KEY_STOP }, /* 2nd stop */
{ 0x0347, KEY_RECORD },
{ 0x0348, KEY_CAMERA },
{ 0x0349, KEY_ESC },
{ 0x034a, KEY_NEW },
{ 0x034b, KEY_RED },
{ 0x034c, KEY_GREEN },
{ 0x034d, KEY_YELLOW },
{ 0x034e, KEY_BLUE },
{ 0x034f, KEY_MENU },
{ 0x0350, KEY_STOP }, /* 1st stop */
{ 0x0351, KEY_CHANNEL },
{ 0x0352, KEY_VIDEO }, /* 1st video */
{ 0x0353, KEY_EPG },
{ 0x0354, KEY_PREVIOUS },
{ 0x0355, KEY_NEXT },
{ 0x0356, KEY_TV },
{ 0x035a, KEY_VOLUMEDOWN },
{ 0x035b, KEY_CHANNELUP },
{ 0x035e, KEY_VOLUMEUP },
{ 0x035f, KEY_CHANNELDOWN },
};
static struct rc_map_list leadtek_y04g0051_map = {
.map = {
.scan = leadtek_y04g0051,
.size = ARRAY_SIZE(leadtek_y04g0051),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_LEADTEK_Y04G0051,
}
};
static int __init init_rc_map_leadtek_y04g0051(void)
{
return rc_map_register(&leadtek_y04g0051_map);
}
static void __exit exit_rc_map_leadtek_y04g0051(void)
{
rc_map_unregister(&leadtek_y04g0051_map);
}
module_init(init_rc_map_leadtek_y04g0051)
module_exit(exit_rc_map_leadtek_y04g0051)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <[email protected]>");
| linux-master | drivers/media/rc/keymaps/rc-leadtek-y04g0051.c |
// SPDX-License-Identifier: GPL-2.0+
// cinergy-1400.h - Keytable for cinergy_1400 Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/* Cinergy 1400 DVB-T */
static struct rc_map_table cinergy_1400[] = {
{ 0x01, KEY_POWER },
{ 0x02, KEY_NUMERIC_1 },
{ 0x03, KEY_NUMERIC_2 },
{ 0x04, KEY_NUMERIC_3 },
{ 0x05, KEY_NUMERIC_4 },
{ 0x06, KEY_NUMERIC_5 },
{ 0x07, KEY_NUMERIC_6 },
{ 0x08, KEY_NUMERIC_7 },
{ 0x09, KEY_NUMERIC_8 },
{ 0x0a, KEY_NUMERIC_9 },
{ 0x0c, KEY_NUMERIC_0 },
{ 0x0b, KEY_VIDEO },
{ 0x0d, KEY_REFRESH },
{ 0x0e, KEY_SELECT },
{ 0x0f, KEY_EPG },
{ 0x10, KEY_UP },
{ 0x11, KEY_LEFT },
{ 0x12, KEY_OK },
{ 0x13, KEY_RIGHT },
{ 0x14, KEY_DOWN },
{ 0x15, KEY_TEXT },
{ 0x16, KEY_INFO },
{ 0x17, KEY_RED },
{ 0x18, KEY_GREEN },
{ 0x19, KEY_YELLOW },
{ 0x1a, KEY_BLUE },
{ 0x1b, KEY_CHANNELUP },
{ 0x1c, KEY_VOLUMEUP },
{ 0x1d, KEY_MUTE },
{ 0x1e, KEY_VOLUMEDOWN },
{ 0x1f, KEY_CHANNELDOWN },
{ 0x40, KEY_PAUSE },
{ 0x4c, KEY_PLAY },
{ 0x58, KEY_RECORD },
{ 0x54, KEY_PREVIOUS },
{ 0x48, KEY_STOP },
{ 0x5c, KEY_NEXT },
};
static struct rc_map_list cinergy_1400_map = {
.map = {
.scan = cinergy_1400,
.size = ARRAY_SIZE(cinergy_1400),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_CINERGY_1400,
}
};
static int __init init_rc_map_cinergy_1400(void)
{
return rc_map_register(&cinergy_1400_map);
}
static void __exit exit_rc_map_cinergy_1400(void)
{
rc_map_unregister(&cinergy_1400_map);
}
module_init(init_rc_map_cinergy_1400)
module_exit(exit_rc_map_cinergy_1400)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-cinergy-1400.c |
// SPDX-License-Identifier: GPL-2.0+
// powercolor-real-angel.h - Keytable for powercolor_real_angel Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/*
* Remote control for Powercolor Real Angel 330
* Daniel Fraga <[email protected]>
*/
static struct rc_map_table powercolor_real_angel[] = {
{ 0x38, KEY_SWITCHVIDEOMODE }, /* switch inputs */
{ 0x0c, KEY_MEDIA }, /* Turn ON/OFF App */
{ 0x00, KEY_NUMERIC_0 },
{ 0x01, KEY_NUMERIC_1 },
{ 0x02, KEY_NUMERIC_2 },
{ 0x03, KEY_NUMERIC_3 },
{ 0x04, KEY_NUMERIC_4 },
{ 0x05, KEY_NUMERIC_5 },
{ 0x06, KEY_NUMERIC_6 },
{ 0x07, KEY_NUMERIC_7 },
{ 0x08, KEY_NUMERIC_8 },
{ 0x09, KEY_NUMERIC_9 },
{ 0x0a, KEY_DIGITS }, /* single, double, triple digit */
{ 0x29, KEY_PREVIOUS }, /* previous channel */
{ 0x12, KEY_BRIGHTNESSUP },
{ 0x13, KEY_BRIGHTNESSDOWN },
{ 0x2b, KEY_MODE }, /* stereo/mono */
{ 0x2c, KEY_TEXT }, /* teletext */
{ 0x20, KEY_CHANNELUP }, /* channel up */
{ 0x21, KEY_CHANNELDOWN }, /* channel down */
{ 0x10, KEY_VOLUMEUP }, /* volume up */
{ 0x11, KEY_VOLUMEDOWN }, /* volume down */
{ 0x0d, KEY_MUTE },
{ 0x1f, KEY_RECORD },
{ 0x17, KEY_PLAY },
{ 0x16, KEY_PAUSE },
{ 0x0b, KEY_STOP },
{ 0x27, KEY_FASTFORWARD },
{ 0x26, KEY_REWIND },
{ 0x1e, KEY_SEARCH }, /* autoscan */
{ 0x0e, KEY_CAMERA }, /* snapshot */
{ 0x2d, KEY_SETUP },
{ 0x0f, KEY_SCREEN }, /* full screen */
{ 0x14, KEY_RADIO }, /* FM radio */
{ 0x25, KEY_POWER }, /* power */
};
static struct rc_map_list powercolor_real_angel_map = {
.map = {
.scan = powercolor_real_angel,
.size = ARRAY_SIZE(powercolor_real_angel),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_POWERCOLOR_REAL_ANGEL,
}
};
static int __init init_rc_map_powercolor_real_angel(void)
{
return rc_map_register(&powercolor_real_angel_map);
}
static void __exit exit_rc_map_powercolor_real_angel(void)
{
rc_map_unregister(&powercolor_real_angel_map);
}
module_init(init_rc_map_powercolor_real_angel)
module_exit(exit_rc_map_powercolor_real_angel)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-powercolor-real-angel.c |
// SPDX-License-Identifier: GPL-2.0+
// msi-tvanywhere-plus.h - Keytable for msi_tvanywhere_plus Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/*
Keycodes for remote on the MSI TV@nywhere Plus. The controller IC on the card
is marked "KS003". The controller is I2C at address 0x30, but does not seem
to respond to probes until a read is performed from a valid device.
I don't know why...
Note: This remote may be of similar or identical design to the
Pixelview remote (?). The raw codes and duplicate button codes
appear to be the same.
Henry Wong <[email protected]>
Some changes to formatting and keycodes by Mark Schultz <[email protected]>
*/
static struct rc_map_table msi_tvanywhere_plus[] = {
/* ---- Remote Button Layout ----
POWER SOURCE SCAN MUTE
TV/FM 1 2 3
|> 4 5 6
<| 7 8 9
^^UP 0 + RECALL
vvDN RECORD STOP PLAY
MINIMIZE ZOOM
CH+
VOL- VOL+
CH-
SNAPSHOT MTS
<< FUNC >> RESET
*/
{ 0x01, KEY_NUMERIC_1 }, /* 1 */
{ 0x0b, KEY_NUMERIC_2 }, /* 2 */
{ 0x1b, KEY_NUMERIC_3 }, /* 3 */
{ 0x05, KEY_NUMERIC_4 }, /* 4 */
{ 0x09, KEY_NUMERIC_5 }, /* 5 */
{ 0x15, KEY_NUMERIC_6 }, /* 6 */
{ 0x06, KEY_NUMERIC_7 }, /* 7 */
{ 0x0a, KEY_NUMERIC_8 }, /* 8 */
{ 0x12, KEY_NUMERIC_9 }, /* 9 */
{ 0x02, KEY_NUMERIC_0 }, /* 0 */
{ 0x10, KEY_KPPLUS }, /* + */
{ 0x13, KEY_AGAIN }, /* Recall */
{ 0x1e, KEY_POWER }, /* Power */
{ 0x07, KEY_VIDEO }, /* Source */
{ 0x1c, KEY_SEARCH }, /* Scan */
{ 0x18, KEY_MUTE }, /* Mute */
{ 0x03, KEY_RADIO }, /* TV/FM */
/* The next four keys are duplicates that appear to send the
same IR code as Ch+, Ch-, >>, and << . The raw code assigned
to them is the actual code + 0x20 - they will never be
detected as such unless some way is discovered to distinguish
these buttons from those that have the same code. */
{ 0x3f, KEY_RIGHT }, /* |> and Ch+ */
{ 0x37, KEY_LEFT }, /* <| and Ch- */
{ 0x2c, KEY_UP }, /* ^^Up and >> */
{ 0x24, KEY_DOWN }, /* vvDn and << */
{ 0x00, KEY_RECORD }, /* Record */
{ 0x08, KEY_STOP }, /* Stop */
{ 0x11, KEY_PLAY }, /* Play */
{ 0x0f, KEY_CLOSE }, /* Minimize */
{ 0x19, KEY_ZOOM }, /* Zoom */
{ 0x1a, KEY_CAMERA }, /* Snapshot */
{ 0x0d, KEY_LANGUAGE }, /* MTS */
{ 0x14, KEY_VOLUMEDOWN }, /* Vol- */
{ 0x16, KEY_VOLUMEUP }, /* Vol+ */
{ 0x17, KEY_CHANNELDOWN }, /* Ch- */
{ 0x1f, KEY_CHANNELUP }, /* Ch+ */
{ 0x04, KEY_REWIND }, /* << */
{ 0x0e, KEY_MENU }, /* Function */
{ 0x0c, KEY_FASTFORWARD }, /* >> */
{ 0x1d, KEY_RESTART }, /* Reset */
};
static struct rc_map_list msi_tvanywhere_plus_map = {
.map = {
.scan = msi_tvanywhere_plus,
.size = ARRAY_SIZE(msi_tvanywhere_plus),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_MSI_TVANYWHERE_PLUS,
}
};
static int __init init_rc_map_msi_tvanywhere_plus(void)
{
return rc_map_register(&msi_tvanywhere_plus_map);
}
static void __exit exit_rc_map_msi_tvanywhere_plus(void)
{
rc_map_unregister(&msi_tvanywhere_plus_map);
}
module_init(init_rc_map_msi_tvanywhere_plus)
module_exit(exit_rc_map_msi_tvanywhere_plus)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* LME2510 remote control
*
* Copyright (C) 2010 Malcolm Priestley ([email protected])
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table lme2510_rc[] = {
/* Type 1 - 26 buttons */
{ 0xef12ba45, KEY_NUMERIC_0 },
{ 0xef12a05f, KEY_NUMERIC_1 },
{ 0xef12af50, KEY_NUMERIC_2 },
{ 0xef12a25d, KEY_NUMERIC_3 },
{ 0xef12be41, KEY_NUMERIC_4 },
{ 0xef12f50a, KEY_NUMERIC_5 },
{ 0xef12bd42, KEY_NUMERIC_6 },
{ 0xef12b847, KEY_NUMERIC_7 },
{ 0xef12b649, KEY_NUMERIC_8 },
{ 0xef12fa05, KEY_NUMERIC_9 },
{ 0xef12bc43, KEY_POWER },
{ 0xef12b946, KEY_SUBTITLE },
{ 0xef12f906, KEY_PAUSE },
{ 0xef12fc03, KEY_MEDIA_REPEAT},
{ 0xef12fd02, KEY_PAUSE },
{ 0xef12a15e, KEY_VOLUMEUP },
{ 0xef12a35c, KEY_VOLUMEDOWN },
{ 0xef12f609, KEY_CHANNELUP },
{ 0xef12e51a, KEY_CHANNELDOWN },
{ 0xef12e11e, KEY_PLAY },
{ 0xef12e41b, KEY_ZOOM },
{ 0xef12a659, KEY_MUTE },
{ 0xef12a55a, KEY_TV },
{ 0xef12e718, KEY_RECORD },
{ 0xef12f807, KEY_EPG },
{ 0xef12fe01, KEY_STOP },
/* Type 2 - 20 buttons */
{ 0xff40ea15, KEY_NUMERIC_0 },
{ 0xff40f708, KEY_NUMERIC_1 },
{ 0xff40f609, KEY_NUMERIC_2 },
{ 0xff40f50a, KEY_NUMERIC_3 },
{ 0xff40f30c, KEY_NUMERIC_4 },
{ 0xff40f20d, KEY_NUMERIC_5 },
{ 0xff40f10e, KEY_NUMERIC_6 },
{ 0xff40ef10, KEY_NUMERIC_7 },
{ 0xff40ee11, KEY_NUMERIC_8 },
{ 0xff40ed12, KEY_NUMERIC_9 },
{ 0xff40ff00, KEY_POWER },
{ 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */
{ 0xff40e51a, KEY_PAUSE }, /* Timeshift */
{ 0xff40fd02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0xff40f906, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0xff40fe01, KEY_CHANNELUP },
{ 0xff40fa05, KEY_CHANNELDOWN },
{ 0xff40eb14, KEY_ZOOM },
{ 0xff40e718, KEY_RECORD },
{ 0xff40e916, KEY_STOP },
/* Type 3 - 20 buttons */
{ 0xff00e31c, KEY_NUMERIC_0 },
{ 0xff00f807, KEY_NUMERIC_1 },
{ 0xff00ea15, KEY_NUMERIC_2 },
{ 0xff00f609, KEY_NUMERIC_3 },
{ 0xff00e916, KEY_NUMERIC_4 },
{ 0xff00e619, KEY_NUMERIC_5 },
{ 0xff00f20d, KEY_NUMERIC_6 },
{ 0xff00f30c, KEY_NUMERIC_7 },
{ 0xff00e718, KEY_NUMERIC_8 },
{ 0xff00a15e, KEY_NUMERIC_9 },
{ 0xff00ba45, KEY_POWER },
{ 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */
{ 0xff00b54a, KEY_PAUSE }, /* Timeshift */
{ 0xff00b847, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0xff00bc43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0xff00b946, KEY_CHANNELUP },
{ 0xff00bf40, KEY_CHANNELDOWN },
{ 0xff00f708, KEY_ZOOM },
{ 0xff00bd42, KEY_RECORD },
{ 0xff00a55a, KEY_STOP },
};
static struct rc_map_list lme2510_map = {
.map = {
.scan = lme2510_rc,
.size = ARRAY_SIZE(lme2510_rc),
.rc_proto = RC_PROTO_NEC,
.name = RC_MAP_LME2510,
}
};
static int __init init_rc_lme2510_map(void)
{
return rc_map_register(&lme2510_map);
}
static void __exit exit_rc_lme2510_map(void)
{
rc_map_unregister(&lme2510_map);
}
module_init(init_rc_lme2510_map)
module_exit(exit_rc_lme2510_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Malcolm Priestley [email protected]");
| linux-master | drivers/media/rc/keymaps/rc-lme2510.c |
// SPDX-License-Identifier: GPL-2.0+
// proteus-2309.h - Keytable for proteus_2309 Remote Controller
//
// keymap imported from ir-keymaps.c
//
// Copyright (c) 2010 by Mauro Carvalho Chehab
#include <media/rc-map.h>
#include <linux/module.h>
/* Michal Majchrowicz <[email protected]> */
static struct rc_map_table proteus_2309[] = {
/* numeric */
{ 0x00, KEY_NUMERIC_0 },
{ 0x01, KEY_NUMERIC_1 },
{ 0x02, KEY_NUMERIC_2 },
{ 0x03, KEY_NUMERIC_3 },
{ 0x04, KEY_NUMERIC_4 },
{ 0x05, KEY_NUMERIC_5 },
{ 0x06, KEY_NUMERIC_6 },
{ 0x07, KEY_NUMERIC_7 },
{ 0x08, KEY_NUMERIC_8 },
{ 0x09, KEY_NUMERIC_9 },
{ 0x5c, KEY_POWER }, /* power */
{ 0x20, KEY_ZOOM }, /* full screen */
{ 0x0f, KEY_BACKSPACE }, /* recall */
{ 0x1b, KEY_ENTER }, /* mute */
{ 0x41, KEY_RECORD }, /* record */
{ 0x43, KEY_STOP }, /* stop */
{ 0x16, KEY_S },
{ 0x1a, KEY_POWER2 }, /* off */
{ 0x2e, KEY_RED },
{ 0x1f, KEY_CHANNELDOWN }, /* channel - */
{ 0x1c, KEY_CHANNELUP }, /* channel + */
{ 0x10, KEY_VOLUMEDOWN }, /* volume - */
{ 0x1e, KEY_VOLUMEUP }, /* volume + */
{ 0x14, KEY_F1 },
};
static struct rc_map_list proteus_2309_map = {
.map = {
.scan = proteus_2309,
.size = ARRAY_SIZE(proteus_2309),
.rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_PROTEUS_2309,
}
};
static int __init init_rc_map_proteus_2309(void)
{
return rc_map_register(&proteus_2309_map);
}
static void __exit exit_rc_map_proteus_2309(void)
{
rc_map_unregister(&proteus_2309_map);
}
module_init(init_rc_map_proteus_2309)
module_exit(exit_rc_map_proteus_2309)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab");
| linux-master | drivers/media/rc/keymaps/rc-proteus-2309.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Decoder found in PowerDown Controller.
*
* Copyright 2010-2014 Imagination Technologies Ltd.
*
* This contains core img-ir code for setting up the driver. The two interfaces
* (raw and hardware decode) are handled separately.
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "img-ir.h"
static irqreturn_t img_ir_isr(int irq, void *dev_id)
{
struct img_ir_priv *priv = dev_id;
u32 irq_status;
spin_lock(&priv->lock);
/* we have to clear irqs before reading */
irq_status = img_ir_read(priv, IMG_IR_IRQ_STATUS);
img_ir_write(priv, IMG_IR_IRQ_CLEAR, irq_status);
/* don't handle valid data irqs if we're only interested in matches */
irq_status &= img_ir_read(priv, IMG_IR_IRQ_ENABLE);
/* hand off edge interrupts to raw decode handler */
if (irq_status & IMG_IR_IRQ_EDGE && img_ir_raw_enabled(&priv->raw))
img_ir_isr_raw(priv, irq_status);
/* hand off hardware match interrupts to hardware decode handler */
if (irq_status & (IMG_IR_IRQ_DATA_MATCH |
IMG_IR_IRQ_DATA_VALID |
IMG_IR_IRQ_DATA2_VALID) &&
img_ir_hw_enabled(&priv->hw))
img_ir_isr_hw(priv, irq_status);
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
static void img_ir_setup(struct img_ir_priv *priv)
{
/* start off with interrupts disabled */
img_ir_write(priv, IMG_IR_IRQ_ENABLE, 0);
img_ir_setup_raw(priv);
img_ir_setup_hw(priv);
if (!IS_ERR(priv->clk))
clk_prepare_enable(priv->clk);
}
static void img_ir_ident(struct img_ir_priv *priv)
{
u32 core_rev = img_ir_read(priv, IMG_IR_CORE_REV);
dev_info(priv->dev,
"IMG IR Decoder (%d.%d.%d.%d) probed successfully\n",
(core_rev & IMG_IR_DESIGNER) >> IMG_IR_DESIGNER_SHIFT,
(core_rev & IMG_IR_MAJOR_REV) >> IMG_IR_MAJOR_REV_SHIFT,
(core_rev & IMG_IR_MINOR_REV) >> IMG_IR_MINOR_REV_SHIFT,
(core_rev & IMG_IR_MAINT_REV) >> IMG_IR_MAINT_REV_SHIFT);
dev_info(priv->dev, "Modes:%s%s\n",
img_ir_hw_enabled(&priv->hw) ? " hardware" : "",
img_ir_raw_enabled(&priv->raw) ? " raw" : "");
}
static int img_ir_probe(struct platform_device *pdev)
{
struct img_ir_priv *priv;
int irq, error, error2;
/* Get resources from platform device */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* Private driver data */
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
priv->dev = &pdev->dev;
spin_lock_init(&priv->lock);
/* Ioremap the registers */
priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
/* Get core clock */
priv->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(priv->clk))
dev_warn(&pdev->dev, "cannot get core clock resource\n");
/* Get sys clock */
priv->sys_clk = devm_clk_get(&pdev->dev, "sys");
if (IS_ERR(priv->sys_clk))
dev_warn(&pdev->dev, "cannot get sys clock resource\n");
/*
* Enabling the system clock before the register interface is
* accessed. ISR shouldn't get called with Sys Clock disabled,
* hence exiting probe with an error.
*/
if (!IS_ERR(priv->sys_clk)) {
error = clk_prepare_enable(priv->sys_clk);
if (error) {
dev_err(&pdev->dev, "cannot enable sys clock\n");
return error;
}
}
/* Set up raw & hw decoder */
error = img_ir_probe_raw(priv);
error2 = img_ir_probe_hw(priv);
if (error && error2) {
if (error == -ENODEV)
error = error2;
goto err_probe;
}
/* Get the IRQ */
priv->irq = irq;
error = request_irq(priv->irq, img_ir_isr, 0, "img-ir", priv);
if (error) {
dev_err(&pdev->dev, "cannot register IRQ %u\n",
priv->irq);
error = -EIO;
goto err_irq;
}
img_ir_ident(priv);
img_ir_setup(priv);
return 0;
err_irq:
img_ir_remove_hw(priv);
img_ir_remove_raw(priv);
err_probe:
if (!IS_ERR(priv->sys_clk))
clk_disable_unprepare(priv->sys_clk);
return error;
}
static void img_ir_remove(struct platform_device *pdev)
{
struct img_ir_priv *priv = platform_get_drvdata(pdev);
free_irq(priv->irq, priv);
img_ir_remove_hw(priv);
img_ir_remove_raw(priv);
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
if (!IS_ERR(priv->sys_clk))
clk_disable_unprepare(priv->sys_clk);
}
static SIMPLE_DEV_PM_OPS(img_ir_pmops, img_ir_suspend, img_ir_resume);
static const struct of_device_id img_ir_match[] = {
{ .compatible = "img,ir-rev1" },
{}
};
MODULE_DEVICE_TABLE(of, img_ir_match);
static struct platform_driver img_ir_driver = {
.driver = {
.name = "img-ir",
.of_match_table = img_ir_match,
.pm = &img_ir_pmops,
},
.probe = img_ir_probe,
.remove_new = img_ir_remove,
};
module_platform_driver(img_ir_driver);
MODULE_AUTHOR("Imagination Technologies Ltd.");
MODULE_DESCRIPTION("ImgTec IR");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/rc/img-ir/img-ir-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Decoder setup for Sharp protocol.
*
* Copyright 2012-2014 Imagination Technologies Ltd.
*/
#include "img-ir-hw.h"
/* Convert Sharp data to a scancode */
static int img_ir_sharp_scancode(int len, u64 raw, u64 enabled_protocols,
struct img_ir_scancode_req *request)
{
unsigned int addr, cmd, exp, chk;
if (len != 15)
return -EINVAL;
addr = (raw >> 0) & 0x1f;
cmd = (raw >> 5) & 0xff;
exp = (raw >> 13) & 0x1;
chk = (raw >> 14) & 0x1;
/* validate data */
if (!exp)
return -EINVAL;
if (chk)
/* probably the second half of the message */
return -EINVAL;
request->protocol = RC_PROTO_SHARP;
request->scancode = addr << 8 | cmd;
return IMG_IR_SCANCODE;
}
/* Convert Sharp scancode to Sharp data filter */
static int img_ir_sharp_filter(const struct rc_scancode_filter *in,
struct img_ir_filter *out, u64 protocols)
{
unsigned int addr, cmd, exp = 0, chk = 0;
unsigned int addr_m, cmd_m, exp_m = 0, chk_m = 0;
addr = (in->data >> 8) & 0x1f;
addr_m = (in->mask >> 8) & 0x1f;
cmd = (in->data >> 0) & 0xff;
cmd_m = (in->mask >> 0) & 0xff;
if (cmd_m) {
/* if filtering commands, we can only match the first part */
exp = 1;
exp_m = 1;
chk = 0;
chk_m = 1;
}
out->data = addr |
cmd << 5 |
exp << 13 |
chk << 14;
out->mask = addr_m |
cmd_m << 5 |
exp_m << 13 |
chk_m << 14;
return 0;
}
/*
* Sharp decoder
* See also http://www.sbprojects.com/knowledge/ir/sharp.php
*/
struct img_ir_decoder img_ir_sharp = {
.type = RC_PROTO_BIT_SHARP,
.control = {
.decoden = 0,
.decodend2 = 1,
.code_type = IMG_IR_CODETYPE_PULSEDIST,
.d1validsel = 1,
},
/* main timings */
.tolerance = 20, /* 20% */
.timings = {
/* 0 symbol */
.s10 = {
.pulse = { 320 /* 320 us */ },
.space = { 680 /* 1 ms period */ },
},
/* 1 symbol */
.s11 = {
.pulse = { 320 /* 320 us */ },
.space = { 1680 /* 2 ms period */ },
},
/* free time */
.ft = {
.minlen = 15,
.maxlen = 15,
.ft_min = 5000, /* 5 ms */
},
},
/* scancode logic */
.scancode = img_ir_sharp_scancode,
.filter = img_ir_sharp_filter,
};
| linux-master | drivers/media/rc/img-ir/img-ir-sharp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Decoder setup for JVC protocol.
*
* Copyright 2012-2014 Imagination Technologies Ltd.
*/
#include "img-ir-hw.h"
/* Convert JVC data to a scancode */
static int img_ir_jvc_scancode(int len, u64 raw, u64 enabled_protocols,
struct img_ir_scancode_req *request)
{
unsigned int cust, data;
if (len != 16)
return -EINVAL;
cust = (raw >> 0) & 0xff;
data = (raw >> 8) & 0xff;
request->protocol = RC_PROTO_JVC;
request->scancode = cust << 8 | data;
return IMG_IR_SCANCODE;
}
/* Convert JVC scancode to JVC data filter */
static int img_ir_jvc_filter(const struct rc_scancode_filter *in,
struct img_ir_filter *out, u64 protocols)
{
unsigned int cust, data;
unsigned int cust_m, data_m;
cust = (in->data >> 8) & 0xff;
cust_m = (in->mask >> 8) & 0xff;
data = (in->data >> 0) & 0xff;
data_m = (in->mask >> 0) & 0xff;
out->data = cust | data << 8;
out->mask = cust_m | data_m << 8;
return 0;
}
/*
* JVC decoder
* See also http://www.sbprojects.com/knowledge/ir/jvc.php
* http://support.jvc.com/consumer/support/documents/RemoteCodes.pdf
*/
struct img_ir_decoder img_ir_jvc = {
.type = RC_PROTO_BIT_JVC,
.control = {
.decoden = 1,
.code_type = IMG_IR_CODETYPE_PULSEDIST,
},
/* main timings */
.unit = 527500, /* 527.5 us */
.timings = {
/* leader symbol */
.ldr = {
.pulse = { 16 /* 8.44 ms */ },
.space = { 8 /* 4.22 ms */ },
},
/* 0 symbol */
.s00 = {
.pulse = { 1 /* 527.5 us +-60 us */ },
.space = { 1 /* 527.5 us */ },
},
/* 1 symbol */
.s01 = {
.pulse = { 1 /* 527.5 us +-60 us */ },
.space = { 3 /* 1.5825 ms +-40 us */ },
},
/* free time */
.ft = {
.minlen = 16,
.maxlen = 16,
.ft_min = 10, /* 5.275 ms */
},
},
/* scancode logic */
.scancode = img_ir_jvc_scancode,
.filter = img_ir_jvc_filter,
};
| linux-master | drivers/media/rc/img-ir/img-ir-jvc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Raw Decoder found in PowerDown Controller.
*
* Copyright 2010-2014 Imagination Technologies Ltd.
*
* This ties into the input subsystem using the RC-core in raw mode. Raw IR
* signal edges are reported and decoded by generic software decoders.
*/
#include <linux/spinlock.h>
#include <media/rc-core.h>
#include "img-ir.h"
#define ECHO_TIMEOUT_MS 150 /* ms between echos */
/* must be called with priv->lock held */
static void img_ir_refresh_raw(struct img_ir_priv *priv, u32 irq_status)
{
struct img_ir_priv_raw *raw = &priv->raw;
struct rc_dev *rc_dev = priv->raw.rdev;
int multiple;
u32 ir_status;
/* find whether both rise and fall was detected */
multiple = ((irq_status & IMG_IR_IRQ_EDGE) == IMG_IR_IRQ_EDGE);
/*
* If so, we need to see if the level has actually changed.
* If it's just noise that we didn't have time to process,
* there's no point reporting it.
*/
ir_status = img_ir_read(priv, IMG_IR_STATUS) & IMG_IR_IRRXD;
if (multiple && ir_status == raw->last_status)
return;
raw->last_status = ir_status;
/* report the edge to the IR raw decoders */
if (ir_status) /* low */
ir_raw_event_store_edge(rc_dev, false);
else /* high */
ir_raw_event_store_edge(rc_dev, true);
ir_raw_event_handle(rc_dev);
}
/* called with priv->lock held */
void img_ir_isr_raw(struct img_ir_priv *priv, u32 irq_status)
{
struct img_ir_priv_raw *raw = &priv->raw;
/* check not removing */
if (!raw->rdev)
return;
img_ir_refresh_raw(priv, irq_status);
/* start / push back the echo timer */
mod_timer(&raw->timer, jiffies + msecs_to_jiffies(ECHO_TIMEOUT_MS));
}
/*
* Echo timer callback function.
* The raw decoders expect to get a final sample even if there are no edges, in
* order to be assured of the final space. If there are no edges for a certain
* time we use this timer to emit a final sample to satisfy them.
*/
static void img_ir_echo_timer(struct timer_list *t)
{
struct img_ir_priv *priv = from_timer(priv, t, raw.timer);
spin_lock_irq(&priv->lock);
/* check not removing */
if (priv->raw.rdev)
/*
* It's safe to pass irq_status=0 since it's only used to check
* for double edges.
*/
img_ir_refresh_raw(priv, 0);
spin_unlock_irq(&priv->lock);
}
void img_ir_setup_raw(struct img_ir_priv *priv)
{
u32 irq_en;
if (!priv->raw.rdev)
return;
/* clear and enable edge interrupts */
spin_lock_irq(&priv->lock);
irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
irq_en |= IMG_IR_IRQ_EDGE;
img_ir_write(priv, IMG_IR_IRQ_CLEAR, IMG_IR_IRQ_EDGE);
img_ir_write(priv, IMG_IR_IRQ_ENABLE, irq_en);
spin_unlock_irq(&priv->lock);
}
int img_ir_probe_raw(struct img_ir_priv *priv)
{
struct img_ir_priv_raw *raw = &priv->raw;
struct rc_dev *rdev;
int error;
/* Set up the echo timer */
timer_setup(&raw->timer, img_ir_echo_timer, 0);
/* Allocate raw decoder */
raw->rdev = rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
if (!rdev) {
dev_err(priv->dev, "cannot allocate raw input device\n");
return -ENOMEM;
}
rdev->priv = priv;
rdev->map_name = RC_MAP_EMPTY;
rdev->device_name = "IMG Infrared Decoder Raw";
/* Register raw decoder */
error = rc_register_device(rdev);
if (error) {
dev_err(priv->dev, "failed to register raw IR input device\n");
rc_free_device(rdev);
raw->rdev = NULL;
return error;
}
return 0;
}
void img_ir_remove_raw(struct img_ir_priv *priv)
{
struct img_ir_priv_raw *raw = &priv->raw;
struct rc_dev *rdev = raw->rdev;
u32 irq_en;
if (!rdev)
return;
/* switch off and disable raw (edge) interrupts */
spin_lock_irq(&priv->lock);
raw->rdev = NULL;
irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
irq_en &= ~IMG_IR_IRQ_EDGE;
img_ir_write(priv, IMG_IR_IRQ_ENABLE, irq_en);
img_ir_write(priv, IMG_IR_IRQ_CLEAR, IMG_IR_IRQ_EDGE);
spin_unlock_irq(&priv->lock);
rc_unregister_device(rdev);
del_timer_sync(&raw->timer);
}
| linux-master | drivers/media/rc/img-ir/img-ir-raw.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Decoder setup for NEC protocol.
*
* Copyright 2010-2014 Imagination Technologies Ltd.
*/
#include "img-ir-hw.h"
#include <linux/bitrev.h>
#include <linux/log2.h>
/* Convert NEC data to a scancode */
static int img_ir_nec_scancode(int len, u64 raw, u64 enabled_protocols,
struct img_ir_scancode_req *request)
{
unsigned int addr, addr_inv, data, data_inv;
/* a repeat code has no data */
if (!len)
return IMG_IR_REPEATCODE;
if (len != 32)
return -EINVAL;
/* raw encoding: ddDDaaAA */
addr = (raw >> 0) & 0xff;
addr_inv = (raw >> 8) & 0xff;
data = (raw >> 16) & 0xff;
data_inv = (raw >> 24) & 0xff;
if ((data_inv ^ data) != 0xff) {
/* 32-bit NEC (used by Apple and TiVo remotes) */
/* scan encoding: as transmitted, MSBit = first received bit */
request->scancode = bitrev8(addr) << 24 |
bitrev8(addr_inv) << 16 |
bitrev8(data) << 8 |
bitrev8(data_inv);
request->protocol = RC_PROTO_NEC32;
} else if ((addr_inv ^ addr) != 0xff) {
/* Extended NEC */
/* scan encoding: AAaaDD */
request->scancode = addr << 16 |
addr_inv << 8 |
data;
request->protocol = RC_PROTO_NECX;
} else {
/* Normal NEC */
/* scan encoding: AADD */
request->scancode = addr << 8 |
data;
request->protocol = RC_PROTO_NEC;
}
return IMG_IR_SCANCODE;
}
/* Convert NEC scancode to NEC data filter */
static int img_ir_nec_filter(const struct rc_scancode_filter *in,
struct img_ir_filter *out, u64 protocols)
{
unsigned int addr, addr_inv, data, data_inv;
unsigned int addr_m, addr_inv_m, data_m, data_inv_m;
data = in->data & 0xff;
data_m = in->mask & 0xff;
protocols &= RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32;
/*
* If only one bit is set, we were requested to do an exact
* protocol. This should be the case for wakeup filters; for
* normal filters, guess the protocol from the scancode.
*/
if (!is_power_of_2(protocols)) {
if ((in->data | in->mask) & 0xff000000)
protocols = RC_PROTO_BIT_NEC32;
else if ((in->data | in->mask) & 0x00ff0000)
protocols = RC_PROTO_BIT_NECX;
else
protocols = RC_PROTO_BIT_NEC;
}
if (protocols == RC_PROTO_BIT_NEC32) {
/* 32-bit NEC (used by Apple and TiVo remotes) */
/* scan encoding: as transmitted, MSBit = first received bit */
addr = bitrev8(in->data >> 24);
addr_m = bitrev8(in->mask >> 24);
addr_inv = bitrev8(in->data >> 16);
addr_inv_m = bitrev8(in->mask >> 16);
data = bitrev8(in->data >> 8);
data_m = bitrev8(in->mask >> 8);
data_inv = bitrev8(in->data >> 0);
data_inv_m = bitrev8(in->mask >> 0);
} else if (protocols == RC_PROTO_BIT_NECX) {
/* Extended NEC */
/* scan encoding AAaaDD */
addr = (in->data >> 16) & 0xff;
addr_m = (in->mask >> 16) & 0xff;
addr_inv = (in->data >> 8) & 0xff;
addr_inv_m = (in->mask >> 8) & 0xff;
data_inv = data ^ 0xff;
data_inv_m = data_m;
} else {
/* Normal NEC */
/* scan encoding: AADD */
addr = (in->data >> 8) & 0xff;
addr_m = (in->mask >> 8) & 0xff;
addr_inv = addr ^ 0xff;
addr_inv_m = addr_m;
data_inv = data ^ 0xff;
data_inv_m = data_m;
}
/* raw encoding: ddDDaaAA */
out->data = data_inv << 24 |
data << 16 |
addr_inv << 8 |
addr;
out->mask = data_inv_m << 24 |
data_m << 16 |
addr_inv_m << 8 |
addr_m;
return 0;
}
/*
* NEC decoder
* See also http://www.sbprojects.com/knowledge/ir/nec.php
* http://wiki.altium.com/display/ADOH/NEC+Infrared+Transmission+Protocol
*/
struct img_ir_decoder img_ir_nec = {
.type = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32,
.control = {
.decoden = 1,
.code_type = IMG_IR_CODETYPE_PULSEDIST,
},
/* main timings */
.unit = 562500, /* 562.5 us */
.timings = {
/* leader symbol */
.ldr = {
.pulse = { 16 /* 9ms */ },
.space = { 8 /* 4.5ms */ },
},
/* 0 symbol */
.s00 = {
.pulse = { 1 /* 562.5 us */ },
.space = { 1 /* 562.5 us */ },
},
/* 1 symbol */
.s01 = {
.pulse = { 1 /* 562.5 us */ },
.space = { 3 /* 1687.5 us */ },
},
/* free time */
.ft = {
.minlen = 32,
.maxlen = 32,
.ft_min = 10, /* 5.625 ms */
},
},
/* repeat codes */
.repeat = 108, /* 108 ms */
.rtimings = {
/* leader symbol */
.ldr = {
.space = { 4 /* 2.25 ms */ },
},
/* free time */
.ft = {
.minlen = 0, /* repeat code has no data */
.maxlen = 0,
},
},
/* scancode logic */
.scancode = img_ir_nec_scancode,
.filter = img_ir_nec_filter,
};
| linux-master | drivers/media/rc/img-ir/img-ir-nec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Decoder setup for Sanyo protocol.
*
* Copyright 2012-2014 Imagination Technologies Ltd.
*
* From ir-sanyo-decoder.c:
*
* This protocol uses the NEC protocol timings. However, data is formatted as:
* 13 bits Custom Code
* 13 bits NOT(Custom Code)
* 8 bits Key data
* 8 bits NOT(Key data)
*
* According with LIRC, this protocol is used on Sanyo, Aiwa and Chinon
* Information for this protocol is available at the Sanyo LC7461 datasheet.
*/
#include "img-ir-hw.h"
/* Convert Sanyo data to a scancode */
static int img_ir_sanyo_scancode(int len, u64 raw, u64 enabled_protocols,
struct img_ir_scancode_req *request)
{
unsigned int addr, addr_inv, data, data_inv;
/* a repeat code has no data */
if (!len)
return IMG_IR_REPEATCODE;
if (len != 42)
return -EINVAL;
addr = (raw >> 0) & 0x1fff;
addr_inv = (raw >> 13) & 0x1fff;
data = (raw >> 26) & 0xff;
data_inv = (raw >> 34) & 0xff;
/* Validate data */
if ((data_inv ^ data) != 0xff)
return -EINVAL;
/* Validate address */
if ((addr_inv ^ addr) != 0x1fff)
return -EINVAL;
/* Normal Sanyo */
request->protocol = RC_PROTO_SANYO;
request->scancode = addr << 8 | data;
return IMG_IR_SCANCODE;
}
/* Convert Sanyo scancode to Sanyo data filter */
static int img_ir_sanyo_filter(const struct rc_scancode_filter *in,
struct img_ir_filter *out, u64 protocols)
{
unsigned int addr, addr_inv, data, data_inv;
unsigned int addr_m, data_m;
data = in->data & 0xff;
data_m = in->mask & 0xff;
data_inv = data ^ 0xff;
if (in->data & 0xff700000)
return -EINVAL;
addr = (in->data >> 8) & 0x1fff;
addr_m = (in->mask >> 8) & 0x1fff;
addr_inv = addr ^ 0x1fff;
out->data = (u64)data_inv << 34 |
(u64)data << 26 |
addr_inv << 13 |
addr;
out->mask = (u64)data_m << 34 |
(u64)data_m << 26 |
addr_m << 13 |
addr_m;
return 0;
}
/* Sanyo decoder */
struct img_ir_decoder img_ir_sanyo = {
.type = RC_PROTO_BIT_SANYO,
.control = {
.decoden = 1,
.code_type = IMG_IR_CODETYPE_PULSEDIST,
},
/* main timings */
.unit = 562500, /* 562.5 us */
.timings = {
/* leader symbol */
.ldr = {
.pulse = { 16 /* 9ms */ },
.space = { 8 /* 4.5ms */ },
},
/* 0 symbol */
.s00 = {
.pulse = { 1 /* 562.5 us */ },
.space = { 1 /* 562.5 us */ },
},
/* 1 symbol */
.s01 = {
.pulse = { 1 /* 562.5 us */ },
.space = { 3 /* 1687.5 us */ },
},
/* free time */
.ft = {
.minlen = 42,
.maxlen = 42,
.ft_min = 10, /* 5.625 ms */
},
},
/* repeat codes */
.repeat = 108, /* 108 ms */
.rtimings = {
/* leader symbol */
.ldr = {
.space = { 4 /* 2.25 ms */ },
},
/* free time */
.ft = {
.minlen = 0, /* repeat code has no data */
.maxlen = 0,
},
},
/* scancode logic */
.scancode = img_ir_sanyo_scancode,
.filter = img_ir_sanyo_filter,
};
| linux-master | drivers/media/rc/img-ir/img-ir-sanyo.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Decoder setup for Sony (SIRC) protocol.
*
* Copyright 2012-2014 Imagination Technologies Ltd.
*/
#include "img-ir-hw.h"
/* Convert Sony data to a scancode */
static int img_ir_sony_scancode(int len, u64 raw, u64 enabled_protocols,
struct img_ir_scancode_req *request)
{
unsigned int dev, subdev, func;
switch (len) {
case 12:
if (!(enabled_protocols & RC_PROTO_BIT_SONY12))
return -EINVAL;
func = raw & 0x7f; /* first 7 bits */
raw >>= 7;
dev = raw & 0x1f; /* next 5 bits */
subdev = 0;
request->protocol = RC_PROTO_SONY12;
break;
case 15:
if (!(enabled_protocols & RC_PROTO_BIT_SONY15))
return -EINVAL;
func = raw & 0x7f; /* first 7 bits */
raw >>= 7;
dev = raw & 0xff; /* next 8 bits */
subdev = 0;
request->protocol = RC_PROTO_SONY15;
break;
case 20:
if (!(enabled_protocols & RC_PROTO_BIT_SONY20))
return -EINVAL;
func = raw & 0x7f; /* first 7 bits */
raw >>= 7;
dev = raw & 0x1f; /* next 5 bits */
raw >>= 5;
subdev = raw & 0xff; /* next 8 bits */
request->protocol = RC_PROTO_SONY20;
break;
default:
return -EINVAL;
}
request->scancode = dev << 16 | subdev << 8 | func;
return IMG_IR_SCANCODE;
}
/* Convert NEC scancode to NEC data filter */
static int img_ir_sony_filter(const struct rc_scancode_filter *in,
struct img_ir_filter *out, u64 protocols)
{
unsigned int dev, subdev, func;
unsigned int dev_m, subdev_m, func_m;
unsigned int len = 0;
dev = (in->data >> 16) & 0xff;
dev_m = (in->mask >> 16) & 0xff;
subdev = (in->data >> 8) & 0xff;
subdev_m = (in->mask >> 8) & 0xff;
func = (in->data >> 0) & 0x7f;
func_m = (in->mask >> 0) & 0x7f;
protocols &= RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 |
RC_PROTO_BIT_SONY20;
/*
* If only one bit is set, we were requested to do an exact
* protocol. This should be the case for wakeup filters; for
* normal filters, guess the protocol from the scancode.
*/
if (!is_power_of_2(protocols)) {
if (subdev & subdev_m)
protocols = RC_PROTO_BIT_SONY20;
else if (dev & dev_m & 0xe0)
protocols = RC_PROTO_BIT_SONY15;
else
protocols = RC_PROTO_BIT_SONY12;
}
if (protocols == RC_PROTO_BIT_SONY20) {
/* can't encode subdev and higher device bits */
if (dev & dev_m & 0xe0)
return -EINVAL;
len = 20;
dev_m &= 0x1f;
} else if (protocols == RC_PROTO_BIT_SONY15) {
len = 15;
subdev_m = 0;
} else {
/*
* The hardware mask cannot distinguish high device bits and low
* extended bits, so logically AND those bits of the masks
* together.
*/
subdev_m &= (dev_m >> 5) | 0xf8;
dev_m &= 0x1f;
}
/* ensure there aren't any bits straying between fields */
dev &= dev_m;
subdev &= subdev_m;
/* write the hardware filter */
out->data = func |
dev << 7 |
subdev << 15;
out->mask = func_m |
dev_m << 7 |
subdev_m << 15;
if (len) {
out->minlen = len;
out->maxlen = len;
}
return 0;
}
/*
* Sony SIRC decoder
* See also http://www.sbprojects.com/knowledge/ir/sirc.php
* http://picprojects.org.uk/projects/sirc/sonysirc.pdf
*/
struct img_ir_decoder img_ir_sony = {
.type = RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | RC_PROTO_BIT_SONY20,
.control = {
.decoden = 1,
.code_type = IMG_IR_CODETYPE_PULSELEN,
},
/* main timings */
.unit = 600000, /* 600 us */
.timings = {
/* leader symbol */
.ldr = {
.pulse = { 4 /* 2.4 ms */ },
.space = { 1 /* 600 us */ },
},
/* 0 symbol */
.s00 = {
.pulse = { 1 /* 600 us */ },
.space = { 1 /* 600 us */ },
},
/* 1 symbol */
.s01 = {
.pulse = { 2 /* 1.2 ms */ },
.space = { 1 /* 600 us */ },
},
/* free time */
.ft = {
.minlen = 12,
.maxlen = 20,
.ft_min = 10, /* 6 ms */
},
},
/* scancode logic */
.scancode = img_ir_sony_scancode,
.filter = img_ir_sony_filter,
};
| linux-master | drivers/media/rc/img-ir/img-ir-sony.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Decoder setup for Philips RC-6 protocol.
*
* Copyright 2012-2014 Imagination Technologies Ltd.
*/
#include "img-ir-hw.h"
/* Convert RC6 data to a scancode */
static int img_ir_rc6_scancode(int len, u64 raw, u64 enabled_protocols,
struct img_ir_scancode_req *request)
{
unsigned int addr, cmd, mode, trl1, trl2;
/*
* Due to a side effect of the decoder handling the double length
* Trailer bit, the header information is a bit scrambled, and the
* raw data is shifted incorrectly.
* This workaround effectively recovers the header bits.
*
* The Header field should look like this:
*
* StartBit ModeBit2 ModeBit1 ModeBit0 TrailerBit
*
* But what we get is:
*
* ModeBit2 ModeBit1 ModeBit0 TrailerBit1 TrailerBit2
*
* The start bit is not important to recover the scancode.
*/
raw >>= 27;
trl1 = (raw >> 17) & 0x01;
trl2 = (raw >> 16) & 0x01;
mode = (raw >> 18) & 0x07;
addr = (raw >> 8) & 0xff;
cmd = raw & 0xff;
/*
* Due to the above explained irregularity the trailer bits cannot
* have the same value.
*/
if (trl1 == trl2)
return -EINVAL;
/* Only mode 0 supported for now */
if (mode)
return -EINVAL;
request->protocol = RC_PROTO_RC6_0;
request->scancode = addr << 8 | cmd;
request->toggle = trl2;
return IMG_IR_SCANCODE;
}
/* Convert RC6 scancode to RC6 data filter */
static int img_ir_rc6_filter(const struct rc_scancode_filter *in,
struct img_ir_filter *out, u64 protocols)
{
/* Not supported by the hw. */
return -EINVAL;
}
/*
* RC-6 decoder
* see http://www.sbprojects.com/knowledge/ir/rc6.php
*/
struct img_ir_decoder img_ir_rc6 = {
.type = RC_PROTO_BIT_RC6_0,
.control = {
.bitorien = 1,
.code_type = IMG_IR_CODETYPE_BIPHASE,
.decoden = 1,
.decodinpol = 1,
},
/* main timings */
.tolerance = 20,
/*
* Due to a quirk in the img-ir decoder, default header values do
* not work, the values described below were extracted from
* successful RTL test cases.
*/
.timings = {
/* leader symbol */
.ldr = {
.pulse = { 650 },
.space = { 660 },
},
/* 0 symbol */
.s00 = {
.pulse = { 370 },
.space = { 370 },
},
/* 01 symbol */
.s01 = {
.pulse = { 370 },
.space = { 370 },
},
/* free time */
.ft = {
.minlen = 21,
.maxlen = 21,
.ft_min = 2666, /* 2.666 ms */
},
},
/* scancode logic */
.scancode = img_ir_rc6_scancode,
.filter = img_ir_rc6_filter,
};
| linux-master | drivers/media/rc/img-ir/img-ir-rc6.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Hardware Decoder found in PowerDown Controller.
*
* Copyright 2010-2014 Imagination Technologies Ltd.
*
* This ties into the input subsystem using the RC-core. Protocol support is
* provided in separate modules which provide the parameters and scancode
* translation functions to set up the hardware decoder and interpret the
* resulting input.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <media/rc-core.h>
#include "img-ir.h"
/* Decoders lock (only modified to preprocess them) */
static DEFINE_SPINLOCK(img_ir_decoders_lock);
static bool img_ir_decoders_preprocessed;
static struct img_ir_decoder *img_ir_decoders[] = {
#ifdef CONFIG_IR_IMG_NEC
&img_ir_nec,
#endif
#ifdef CONFIG_IR_IMG_JVC
&img_ir_jvc,
#endif
#ifdef CONFIG_IR_IMG_SONY
&img_ir_sony,
#endif
#ifdef CONFIG_IR_IMG_SHARP
&img_ir_sharp,
#endif
#ifdef CONFIG_IR_IMG_SANYO
&img_ir_sanyo,
#endif
#ifdef CONFIG_IR_IMG_RC5
&img_ir_rc5,
#endif
#ifdef CONFIG_IR_IMG_RC6
&img_ir_rc6,
#endif
NULL
};
#define IMG_IR_F_FILTER BIT(RC_FILTER_NORMAL) /* enable filtering */
#define IMG_IR_F_WAKE BIT(RC_FILTER_WAKEUP) /* enable waking */
/* code type quirks */
#define IMG_IR_QUIRK_CODE_BROKEN 0x1 /* Decode is broken */
#define IMG_IR_QUIRK_CODE_LEN_INCR 0x2 /* Bit length needs increment */
/*
* The decoder generates rapid interrupts without actually having
* received any new data after an incomplete IR code is decoded.
*/
#define IMG_IR_QUIRK_CODE_IRQ 0x4
/* functions for preprocessing timings, ensuring max is set */
static void img_ir_timing_preprocess(struct img_ir_timing_range *range,
unsigned int unit)
{
if (range->max < range->min)
range->max = range->min;
if (unit) {
/* multiply by unit and convert to microseconds */
range->min = (range->min*unit)/1000;
range->max = (range->max*unit + 999)/1000; /* round up */
}
}
static void img_ir_symbol_timing_preprocess(struct img_ir_symbol_timing *timing,
unsigned int unit)
{
img_ir_timing_preprocess(&timing->pulse, unit);
img_ir_timing_preprocess(&timing->space, unit);
}
static void img_ir_timings_preprocess(struct img_ir_timings *timings,
unsigned int unit)
{
img_ir_symbol_timing_preprocess(&timings->ldr, unit);
img_ir_symbol_timing_preprocess(&timings->s00, unit);
img_ir_symbol_timing_preprocess(&timings->s01, unit);
img_ir_symbol_timing_preprocess(&timings->s10, unit);
img_ir_symbol_timing_preprocess(&timings->s11, unit);
/* default s10 and s11 to s00 and s01 if no leader */
if (unit)
/* multiply by unit and convert to microseconds (round up) */
timings->ft.ft_min = (timings->ft.ft_min*unit + 999)/1000;
}
/* functions for filling empty fields with defaults */
static void img_ir_timing_defaults(struct img_ir_timing_range *range,
struct img_ir_timing_range *defaults)
{
if (!range->min)
range->min = defaults->min;
if (!range->max)
range->max = defaults->max;
}
static void img_ir_symbol_timing_defaults(struct img_ir_symbol_timing *timing,
struct img_ir_symbol_timing *defaults)
{
img_ir_timing_defaults(&timing->pulse, &defaults->pulse);
img_ir_timing_defaults(&timing->space, &defaults->space);
}
static void img_ir_timings_defaults(struct img_ir_timings *timings,
struct img_ir_timings *defaults)
{
img_ir_symbol_timing_defaults(&timings->ldr, &defaults->ldr);
img_ir_symbol_timing_defaults(&timings->s00, &defaults->s00);
img_ir_symbol_timing_defaults(&timings->s01, &defaults->s01);
img_ir_symbol_timing_defaults(&timings->s10, &defaults->s10);
img_ir_symbol_timing_defaults(&timings->s11, &defaults->s11);
if (!timings->ft.ft_min)
timings->ft.ft_min = defaults->ft.ft_min;
}
/* functions for converting timings to register values */
/**
* img_ir_control() - Convert control struct to control register value.
* @control: Control data
*
* Returns: The control register value equivalent of @control.
*/
static u32 img_ir_control(const struct img_ir_control *control)
{
u32 ctrl = control->code_type << IMG_IR_CODETYPE_SHIFT;
if (control->decoden)
ctrl |= IMG_IR_DECODEN;
if (control->hdrtog)
ctrl |= IMG_IR_HDRTOG;
if (control->ldrdec)
ctrl |= IMG_IR_LDRDEC;
if (control->decodinpol)
ctrl |= IMG_IR_DECODINPOL;
if (control->bitorien)
ctrl |= IMG_IR_BITORIEN;
if (control->d1validsel)
ctrl |= IMG_IR_D1VALIDSEL;
if (control->bitinv)
ctrl |= IMG_IR_BITINV;
if (control->decodend2)
ctrl |= IMG_IR_DECODEND2;
if (control->bitoriend2)
ctrl |= IMG_IR_BITORIEND2;
if (control->bitinvd2)
ctrl |= IMG_IR_BITINVD2;
return ctrl;
}
/**
* img_ir_timing_range_convert() - Convert microsecond range.
* @out: Output timing range in clock cycles with a shift.
* @in: Input timing range in microseconds.
* @tolerance: Tolerance as a fraction of 128 (roughly percent).
* @clock_hz: IR clock rate in Hz.
* @shift: Shift of output units.
*
* Converts min and max from microseconds to IR clock cycles, applies a
* tolerance, and shifts for the register, rounding in the right direction.
* Note that in and out can safely be the same object.
*/
static void img_ir_timing_range_convert(struct img_ir_timing_range *out,
const struct img_ir_timing_range *in,
unsigned int tolerance,
unsigned long clock_hz,
unsigned int shift)
{
unsigned int min = in->min;
unsigned int max = in->max;
/* add a tolerance */
min = min - (min*tolerance >> 7);
max = max + (max*tolerance >> 7);
/* convert from microseconds into clock cycles */
min = min*clock_hz / 1000000;
max = (max*clock_hz + 999999) / 1000000; /* round up */
/* apply shift and copy to output */
out->min = min >> shift;
out->max = (max + ((1 << shift) - 1)) >> shift; /* round up */
}
/**
* img_ir_symbol_timing() - Convert symbol timing struct to register value.
* @timing: Symbol timing data
* @tolerance: Timing tolerance where 0-128 represents 0-100%
* @clock_hz: Frequency of source clock in Hz
* @pd_shift: Shift to apply to symbol period
* @w_shift: Shift to apply to symbol width
*
* Returns: Symbol timing register value based on arguments.
*/
static u32 img_ir_symbol_timing(const struct img_ir_symbol_timing *timing,
unsigned int tolerance,
unsigned long clock_hz,
unsigned int pd_shift,
unsigned int w_shift)
{
struct img_ir_timing_range hw_pulse, hw_period;
/* we calculate period in hw_period, then convert in place */
hw_period.min = timing->pulse.min + timing->space.min;
hw_period.max = timing->pulse.max + timing->space.max;
img_ir_timing_range_convert(&hw_period, &hw_period,
tolerance, clock_hz, pd_shift);
img_ir_timing_range_convert(&hw_pulse, &timing->pulse,
tolerance, clock_hz, w_shift);
/* construct register value */
return (hw_period.max << IMG_IR_PD_MAX_SHIFT) |
(hw_period.min << IMG_IR_PD_MIN_SHIFT) |
(hw_pulse.max << IMG_IR_W_MAX_SHIFT) |
(hw_pulse.min << IMG_IR_W_MIN_SHIFT);
}
/**
* img_ir_free_timing() - Convert free time timing struct to register value.
* @timing: Free symbol timing data
* @clock_hz: Source clock frequency in Hz
*
* Returns: Free symbol timing register value.
*/
static u32 img_ir_free_timing(const struct img_ir_free_timing *timing,
unsigned long clock_hz)
{
unsigned int minlen, maxlen, ft_min;
/* minlen is only 5 bits, and round minlen to multiple of 2 */
if (timing->minlen < 30)
minlen = timing->minlen & -2;
else
minlen = 30;
/* maxlen has maximum value of 48, and round maxlen to multiple of 2 */
if (timing->maxlen < 48)
maxlen = (timing->maxlen + 1) & -2;
else
maxlen = 48;
/* convert and shift ft_min, rounding upwards */
ft_min = (timing->ft_min*clock_hz + 999999) / 1000000;
ft_min = (ft_min + 7) >> 3;
/* construct register value */
return (maxlen << IMG_IR_MAXLEN_SHIFT) |
(minlen << IMG_IR_MINLEN_SHIFT) |
(ft_min << IMG_IR_FT_MIN_SHIFT);
}
/**
* img_ir_free_timing_dynamic() - Update free time register value.
* @st_ft: Static free time register value from img_ir_free_timing.
* @filter: Current filter which may additionally restrict min/max len.
*
* Returns: Updated free time register value based on the current filter.
*/
static u32 img_ir_free_timing_dynamic(u32 st_ft, struct img_ir_filter *filter)
{
unsigned int minlen, maxlen, newminlen, newmaxlen;
/* round minlen, maxlen to multiple of 2 */
newminlen = filter->minlen & -2;
newmaxlen = (filter->maxlen + 1) & -2;
/* extract min/max len from register */
minlen = (st_ft & IMG_IR_MINLEN) >> IMG_IR_MINLEN_SHIFT;
maxlen = (st_ft & IMG_IR_MAXLEN) >> IMG_IR_MAXLEN_SHIFT;
/* if the new values are more restrictive, update the register value */
if (newminlen > minlen) {
st_ft &= ~IMG_IR_MINLEN;
st_ft |= newminlen << IMG_IR_MINLEN_SHIFT;
}
if (newmaxlen < maxlen) {
st_ft &= ~IMG_IR_MAXLEN;
st_ft |= newmaxlen << IMG_IR_MAXLEN_SHIFT;
}
return st_ft;
}
/**
* img_ir_timings_convert() - Convert timings to register values
* @regs: Output timing register values
* @timings: Input timing data
* @tolerance: Timing tolerance where 0-128 represents 0-100%
* @clock_hz: Source clock frequency in Hz
*/
static void img_ir_timings_convert(struct img_ir_timing_regvals *regs,
const struct img_ir_timings *timings,
unsigned int tolerance,
unsigned int clock_hz)
{
/* leader symbol timings are divided by 16 */
regs->ldr = img_ir_symbol_timing(&timings->ldr, tolerance, clock_hz,
4, 4);
/* other symbol timings, pd fields only are divided by 2 */
regs->s00 = img_ir_symbol_timing(&timings->s00, tolerance, clock_hz,
1, 0);
regs->s01 = img_ir_symbol_timing(&timings->s01, tolerance, clock_hz,
1, 0);
regs->s10 = img_ir_symbol_timing(&timings->s10, tolerance, clock_hz,
1, 0);
regs->s11 = img_ir_symbol_timing(&timings->s11, tolerance, clock_hz,
1, 0);
regs->ft = img_ir_free_timing(&timings->ft, clock_hz);
}
/**
* img_ir_decoder_preprocess() - Preprocess timings in decoder.
* @decoder: Decoder to be preprocessed.
*
* Ensures that the symbol timing ranges are valid with respect to ordering, and
* does some fixed conversion on them.
*/
static void img_ir_decoder_preprocess(struct img_ir_decoder *decoder)
{
/* default tolerance */
if (!decoder->tolerance)
decoder->tolerance = 10; /* percent */
/* and convert tolerance to fraction out of 128 */
decoder->tolerance = decoder->tolerance * 128 / 100;
/* fill in implicit fields */
img_ir_timings_preprocess(&decoder->timings, decoder->unit);
/* do the same for repeat timings if applicable */
if (decoder->repeat) {
img_ir_timings_preprocess(&decoder->rtimings, decoder->unit);
img_ir_timings_defaults(&decoder->rtimings, &decoder->timings);
}
}
/**
* img_ir_decoder_convert() - Generate internal timings in decoder.
* @decoder: Decoder to be converted to internal timings.
* @reg_timings: Timing register values.
* @clock_hz: IR clock rate in Hz.
*
* Fills out the repeat timings and timing register values for a specific clock
* rate.
*/
static void img_ir_decoder_convert(const struct img_ir_decoder *decoder,
struct img_ir_reg_timings *reg_timings,
unsigned int clock_hz)
{
/* calculate control value */
reg_timings->ctrl = img_ir_control(&decoder->control);
/* fill in implicit fields and calculate register values */
img_ir_timings_convert(®_timings->timings, &decoder->timings,
decoder->tolerance, clock_hz);
/* do the same for repeat timings if applicable */
if (decoder->repeat)
img_ir_timings_convert(®_timings->rtimings,
&decoder->rtimings, decoder->tolerance,
clock_hz);
}
/**
* img_ir_write_timings() - Write timings to the hardware now
* @priv: IR private data
* @regs: Timing register values to write
* @type: RC filter type (RC_FILTER_*)
*
* Write timing register values @regs to the hardware, taking into account the
* current filter which may impose restrictions on the length of the expected
* data.
*/
static void img_ir_write_timings(struct img_ir_priv *priv,
struct img_ir_timing_regvals *regs,
enum rc_filter_type type)
{
struct img_ir_priv_hw *hw = &priv->hw;
/* filter may be more restrictive to minlen, maxlen */
u32 ft = regs->ft;
if (hw->flags & BIT(type))
ft = img_ir_free_timing_dynamic(regs->ft, &hw->filters[type]);
/* write to registers */
img_ir_write(priv, IMG_IR_LEAD_SYMB_TIMING, regs->ldr);
img_ir_write(priv, IMG_IR_S00_SYMB_TIMING, regs->s00);
img_ir_write(priv, IMG_IR_S01_SYMB_TIMING, regs->s01);
img_ir_write(priv, IMG_IR_S10_SYMB_TIMING, regs->s10);
img_ir_write(priv, IMG_IR_S11_SYMB_TIMING, regs->s11);
img_ir_write(priv, IMG_IR_FREE_SYMB_TIMING, ft);
dev_dbg(priv->dev, "timings: ldr=%#x, s=[%#x, %#x, %#x, %#x], ft=%#x\n",
regs->ldr, regs->s00, regs->s01, regs->s10, regs->s11, ft);
}
static void img_ir_write_filter(struct img_ir_priv *priv,
struct img_ir_filter *filter)
{
if (filter) {
dev_dbg(priv->dev, "IR filter=%016llx & %016llx\n",
(unsigned long long)filter->data,
(unsigned long long)filter->mask);
img_ir_write(priv, IMG_IR_IRQ_MSG_DATA_LW, (u32)filter->data);
img_ir_write(priv, IMG_IR_IRQ_MSG_DATA_UP, (u32)(filter->data
>> 32));
img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_LW, (u32)filter->mask);
img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_UP, (u32)(filter->mask
>> 32));
} else {
dev_dbg(priv->dev, "IR clearing filter\n");
img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_LW, 0);
img_ir_write(priv, IMG_IR_IRQ_MSG_MASK_UP, 0);
}
}
/* caller must have lock */
static void _img_ir_set_filter(struct img_ir_priv *priv,
struct img_ir_filter *filter)
{
struct img_ir_priv_hw *hw = &priv->hw;
u32 irq_en, irq_on;
irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
if (filter) {
/* Only use the match interrupt */
hw->filters[RC_FILTER_NORMAL] = *filter;
hw->flags |= IMG_IR_F_FILTER;
irq_on = IMG_IR_IRQ_DATA_MATCH;
irq_en &= ~(IMG_IR_IRQ_DATA_VALID | IMG_IR_IRQ_DATA2_VALID);
} else {
/* Only use the valid interrupt */
hw->flags &= ~IMG_IR_F_FILTER;
irq_en &= ~IMG_IR_IRQ_DATA_MATCH;
irq_on = IMG_IR_IRQ_DATA_VALID | IMG_IR_IRQ_DATA2_VALID;
}
irq_en |= irq_on;
img_ir_write_filter(priv, filter);
/* clear any interrupts we're enabling so we don't handle old ones */
img_ir_write(priv, IMG_IR_IRQ_CLEAR, irq_on);
img_ir_write(priv, IMG_IR_IRQ_ENABLE, irq_en);
}
/* caller must have lock */
static void _img_ir_set_wake_filter(struct img_ir_priv *priv,
struct img_ir_filter *filter)
{
struct img_ir_priv_hw *hw = &priv->hw;
if (filter) {
/* Enable wake, and copy filter for later */
hw->filters[RC_FILTER_WAKEUP] = *filter;
hw->flags |= IMG_IR_F_WAKE;
} else {
/* Disable wake */
hw->flags &= ~IMG_IR_F_WAKE;
}
}
/* Callback for setting scancode filter */
static int img_ir_set_filter(struct rc_dev *dev, enum rc_filter_type type,
struct rc_scancode_filter *sc_filter)
{
struct img_ir_priv *priv = dev->priv;
struct img_ir_priv_hw *hw = &priv->hw;
struct img_ir_filter filter, *filter_ptr = &filter;
int ret = 0;
dev_dbg(priv->dev, "IR scancode %sfilter=%08x & %08x\n",
type == RC_FILTER_WAKEUP ? "wake " : "",
sc_filter->data,
sc_filter->mask);
spin_lock_irq(&priv->lock);
/* filtering can always be disabled */
if (!sc_filter->mask) {
filter_ptr = NULL;
goto set_unlock;
}
/* current decoder must support scancode filtering */
if (!hw->decoder || !hw->decoder->filter) {
ret = -EINVAL;
goto unlock;
}
/* convert scancode filter to raw filter */
filter.minlen = 0;
filter.maxlen = ~0;
if (type == RC_FILTER_NORMAL) {
/* guess scancode from protocol */
ret = hw->decoder->filter(sc_filter, &filter,
dev->enabled_protocols);
} else {
/* for wakeup user provided exact protocol variant */
ret = hw->decoder->filter(sc_filter, &filter,
1ULL << dev->wakeup_protocol);
}
if (ret)
goto unlock;
dev_dbg(priv->dev, "IR raw %sfilter=%016llx & %016llx\n",
type == RC_FILTER_WAKEUP ? "wake " : "",
(unsigned long long)filter.data,
(unsigned long long)filter.mask);
set_unlock:
/* apply raw filters */
switch (type) {
case RC_FILTER_NORMAL:
_img_ir_set_filter(priv, filter_ptr);
break;
case RC_FILTER_WAKEUP:
_img_ir_set_wake_filter(priv, filter_ptr);
break;
default:
ret = -EINVAL;
}
unlock:
spin_unlock_irq(&priv->lock);
return ret;
}
static int img_ir_set_normal_filter(struct rc_dev *dev,
struct rc_scancode_filter *sc_filter)
{
return img_ir_set_filter(dev, RC_FILTER_NORMAL, sc_filter);
}
static int img_ir_set_wakeup_filter(struct rc_dev *dev,
struct rc_scancode_filter *sc_filter)
{
return img_ir_set_filter(dev, RC_FILTER_WAKEUP, sc_filter);
}
/**
* img_ir_set_decoder() - Set the current decoder.
* @priv: IR private data.
* @decoder: Decoder to use with immediate effect.
* @proto: Protocol bitmap (or 0 to use decoder->type).
*/
static void img_ir_set_decoder(struct img_ir_priv *priv,
const struct img_ir_decoder *decoder,
u64 proto)
{
struct img_ir_priv_hw *hw = &priv->hw;
struct rc_dev *rdev = hw->rdev;
u32 ir_status, irq_en;
spin_lock_irq(&priv->lock);
/*
* First record that the protocol is being stopped so that the end timer
* isn't restarted while we're trying to stop it.
*/
hw->stopping = true;
/*
* Release the lock to stop the end timer, since the end timer handler
* acquires the lock and we don't want to deadlock waiting for it.
*/
spin_unlock_irq(&priv->lock);
del_timer_sync(&hw->end_timer);
del_timer_sync(&hw->suspend_timer);
spin_lock_irq(&priv->lock);
hw->stopping = false;
/* switch off and disable interrupts */
img_ir_write(priv, IMG_IR_CONTROL, 0);
irq_en = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
img_ir_write(priv, IMG_IR_IRQ_ENABLE, irq_en & IMG_IR_IRQ_EDGE);
img_ir_write(priv, IMG_IR_IRQ_CLEAR, IMG_IR_IRQ_ALL & ~IMG_IR_IRQ_EDGE);
/* ack any data already detected */
ir_status = img_ir_read(priv, IMG_IR_STATUS);
if (ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2)) {
ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
img_ir_write(priv, IMG_IR_STATUS, ir_status);
}
/* always read data to clear buffer if IR wakes the device */
img_ir_read(priv, IMG_IR_DATA_LW);
img_ir_read(priv, IMG_IR_DATA_UP);
/* switch back to normal mode */
hw->mode = IMG_IR_M_NORMAL;
/* clear the wakeup scancode filter */
rdev->scancode_wakeup_filter.data = 0;
rdev->scancode_wakeup_filter.mask = 0;
rdev->wakeup_protocol = RC_PROTO_UNKNOWN;
/* clear raw filters */
_img_ir_set_filter(priv, NULL);
_img_ir_set_wake_filter(priv, NULL);
/* clear the enabled protocols */
hw->enabled_protocols = 0;
/* switch decoder */
hw->decoder = decoder;
if (!decoder)
goto unlock;
/* set the enabled protocols */
if (!proto)
proto = decoder->type;
hw->enabled_protocols = proto;
/* write the new timings */
img_ir_decoder_convert(decoder, &hw->reg_timings, hw->clk_hz);
img_ir_write_timings(priv, &hw->reg_timings.timings, RC_FILTER_NORMAL);
/* set up and enable */
img_ir_write(priv, IMG_IR_CONTROL, hw->reg_timings.ctrl);
unlock:
spin_unlock_irq(&priv->lock);
}
/**
* img_ir_decoder_compatible() - Find whether a decoder will work with a device.
* @priv: IR private data.
* @dec: Decoder to check.
*
* Returns: true if @dec is compatible with the device @priv refers to.
*/
static bool img_ir_decoder_compatible(struct img_ir_priv *priv,
const struct img_ir_decoder *dec)
{
unsigned int ct;
/* don't accept decoders using code types which aren't supported */
ct = dec->control.code_type;
if (priv->hw.ct_quirks[ct] & IMG_IR_QUIRK_CODE_BROKEN)
return false;
return true;
}
/**
* img_ir_allowed_protos() - Get allowed protocols from global decoder list.
* @priv: IR private data.
*
* Returns: Mask of protocols supported by the device @priv refers to.
*/
static u64 img_ir_allowed_protos(struct img_ir_priv *priv)
{
u64 protos = 0;
struct img_ir_decoder **decp;
for (decp = img_ir_decoders; *decp; ++decp) {
const struct img_ir_decoder *dec = *decp;
if (img_ir_decoder_compatible(priv, dec))
protos |= dec->type;
}
return protos;
}
/* Callback for changing protocol using sysfs */
static int img_ir_change_protocol(struct rc_dev *dev, u64 *ir_type)
{
struct img_ir_priv *priv = dev->priv;
struct img_ir_priv_hw *hw = &priv->hw;
struct rc_dev *rdev = hw->rdev;
struct img_ir_decoder **decp;
u64 wakeup_protocols;
if (!*ir_type) {
/* disable all protocols */
img_ir_set_decoder(priv, NULL, 0);
goto success;
}
for (decp = img_ir_decoders; *decp; ++decp) {
const struct img_ir_decoder *dec = *decp;
if (!img_ir_decoder_compatible(priv, dec))
continue;
if (*ir_type & dec->type) {
*ir_type &= dec->type;
img_ir_set_decoder(priv, dec, *ir_type);
goto success;
}
}
return -EINVAL;
success:
/*
* Only allow matching wakeup protocols for now, and only if filtering
* is supported.
*/
wakeup_protocols = *ir_type;
if (!hw->decoder || !hw->decoder->filter)
wakeup_protocols = 0;
rdev->allowed_wakeup_protocols = wakeup_protocols;
return 0;
}
/* Changes ir-core protocol device attribute */
static void img_ir_set_protocol(struct img_ir_priv *priv, u64 proto)
{
struct rc_dev *rdev = priv->hw.rdev;
mutex_lock(&rdev->lock);
rdev->enabled_protocols = proto;
rdev->allowed_wakeup_protocols = proto;
mutex_unlock(&rdev->lock);
}
/* Set up IR decoders */
static void img_ir_init_decoders(void)
{
struct img_ir_decoder **decp;
spin_lock(&img_ir_decoders_lock);
if (!img_ir_decoders_preprocessed) {
for (decp = img_ir_decoders; *decp; ++decp)
img_ir_decoder_preprocess(*decp);
img_ir_decoders_preprocessed = true;
}
spin_unlock(&img_ir_decoders_lock);
}
#ifdef CONFIG_PM_SLEEP
/**
* img_ir_enable_wake() - Switch to wake mode.
* @priv: IR private data.
*
* Returns: non-zero if the IR can wake the system.
*/
static int img_ir_enable_wake(struct img_ir_priv *priv)
{
struct img_ir_priv_hw *hw = &priv->hw;
int ret = 0;
spin_lock_irq(&priv->lock);
if (hw->flags & IMG_IR_F_WAKE) {
/* interrupt only on a match */
hw->suspend_irqen = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
img_ir_write(priv, IMG_IR_IRQ_ENABLE, IMG_IR_IRQ_DATA_MATCH);
img_ir_write_filter(priv, &hw->filters[RC_FILTER_WAKEUP]);
img_ir_write_timings(priv, &hw->reg_timings.timings,
RC_FILTER_WAKEUP);
hw->mode = IMG_IR_M_WAKE;
ret = 1;
}
spin_unlock_irq(&priv->lock);
return ret;
}
/**
* img_ir_disable_wake() - Switch out of wake mode.
* @priv: IR private data
*
* Returns: 1 if the hardware should be allowed to wake from a sleep state.
* 0 otherwise.
*/
static int img_ir_disable_wake(struct img_ir_priv *priv)
{
struct img_ir_priv_hw *hw = &priv->hw;
int ret = 0;
spin_lock_irq(&priv->lock);
if (hw->flags & IMG_IR_F_WAKE) {
/* restore normal filtering */
if (hw->flags & IMG_IR_F_FILTER) {
img_ir_write(priv, IMG_IR_IRQ_ENABLE,
(hw->suspend_irqen & IMG_IR_IRQ_EDGE) |
IMG_IR_IRQ_DATA_MATCH);
img_ir_write_filter(priv,
&hw->filters[RC_FILTER_NORMAL]);
} else {
img_ir_write(priv, IMG_IR_IRQ_ENABLE,
(hw->suspend_irqen & IMG_IR_IRQ_EDGE) |
IMG_IR_IRQ_DATA_VALID |
IMG_IR_IRQ_DATA2_VALID);
img_ir_write_filter(priv, NULL);
}
img_ir_write_timings(priv, &hw->reg_timings.timings,
RC_FILTER_NORMAL);
hw->mode = IMG_IR_M_NORMAL;
ret = 1;
}
spin_unlock_irq(&priv->lock);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
/* lock must be held */
static void img_ir_begin_repeat(struct img_ir_priv *priv)
{
struct img_ir_priv_hw *hw = &priv->hw;
if (hw->mode == IMG_IR_M_NORMAL) {
/* switch to repeat timings */
img_ir_write(priv, IMG_IR_CONTROL, 0);
hw->mode = IMG_IR_M_REPEATING;
img_ir_write_timings(priv, &hw->reg_timings.rtimings,
RC_FILTER_NORMAL);
img_ir_write(priv, IMG_IR_CONTROL, hw->reg_timings.ctrl);
}
}
/* lock must be held */
static void img_ir_end_repeat(struct img_ir_priv *priv)
{
struct img_ir_priv_hw *hw = &priv->hw;
if (hw->mode == IMG_IR_M_REPEATING) {
/* switch to normal timings */
img_ir_write(priv, IMG_IR_CONTROL, 0);
hw->mode = IMG_IR_M_NORMAL;
img_ir_write_timings(priv, &hw->reg_timings.timings,
RC_FILTER_NORMAL);
img_ir_write(priv, IMG_IR_CONTROL, hw->reg_timings.ctrl);
}
}
/* lock must be held */
static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
{
struct img_ir_priv_hw *hw = &priv->hw;
const struct img_ir_decoder *dec = hw->decoder;
int ret = IMG_IR_SCANCODE;
struct img_ir_scancode_req request;
request.protocol = RC_PROTO_UNKNOWN;
request.toggle = 0;
if (dec->scancode)
ret = dec->scancode(len, raw, hw->enabled_protocols, &request);
else if (len >= 32)
request.scancode = (u32)raw;
else if (len < 32)
request.scancode = (u32)raw & ((1 << len)-1);
dev_dbg(priv->dev, "data (%u bits) = %#llx\n",
len, (unsigned long long)raw);
if (ret == IMG_IR_SCANCODE) {
dev_dbg(priv->dev, "decoded scan code %#x, toggle %u\n",
request.scancode, request.toggle);
rc_keydown(hw->rdev, request.protocol, request.scancode,
request.toggle);
img_ir_end_repeat(priv);
} else if (ret == IMG_IR_REPEATCODE) {
if (hw->mode == IMG_IR_M_REPEATING) {
dev_dbg(priv->dev, "decoded repeat code\n");
rc_repeat(hw->rdev);
} else {
dev_dbg(priv->dev, "decoded unexpected repeat code, ignoring\n");
}
} else {
dev_dbg(priv->dev, "decode failed (%d)\n", ret);
return;
}
/* we mustn't update the end timer while trying to stop it */
if (dec->repeat && !hw->stopping) {
unsigned long interval;
img_ir_begin_repeat(priv);
/* update timer, but allowing for 1/8th tolerance */
interval = dec->repeat + (dec->repeat >> 3);
mod_timer(&hw->end_timer,
jiffies + msecs_to_jiffies(interval));
}
}
/* timer function to end waiting for repeat. */
static void img_ir_end_timer(struct timer_list *t)
{
struct img_ir_priv *priv = from_timer(priv, t, hw.end_timer);
spin_lock_irq(&priv->lock);
img_ir_end_repeat(priv);
spin_unlock_irq(&priv->lock);
}
/*
* Timer function to re-enable the current protocol after it had been
* cleared when invalid interrupts were generated due to a quirk in the
* img-ir decoder.
*/
static void img_ir_suspend_timer(struct timer_list *t)
{
struct img_ir_priv *priv = from_timer(priv, t, hw.suspend_timer);
spin_lock_irq(&priv->lock);
/*
* Don't overwrite enabled valid/match IRQs if they have already been
* changed by e.g. a filter change.
*/
if ((priv->hw.quirk_suspend_irq & IMG_IR_IRQ_EDGE) ==
img_ir_read(priv, IMG_IR_IRQ_ENABLE))
img_ir_write(priv, IMG_IR_IRQ_ENABLE,
priv->hw.quirk_suspend_irq);
/* enable */
img_ir_write(priv, IMG_IR_CONTROL, priv->hw.reg_timings.ctrl);
spin_unlock_irq(&priv->lock);
}
#ifdef CONFIG_COMMON_CLK
static void img_ir_change_frequency(struct img_ir_priv *priv,
struct clk_notifier_data *change)
{
struct img_ir_priv_hw *hw = &priv->hw;
dev_dbg(priv->dev, "clk changed %lu HZ -> %lu HZ\n",
change->old_rate, change->new_rate);
spin_lock_irq(&priv->lock);
if (hw->clk_hz == change->new_rate)
goto unlock;
hw->clk_hz = change->new_rate;
/* refresh current timings */
if (hw->decoder) {
img_ir_decoder_convert(hw->decoder, &hw->reg_timings,
hw->clk_hz);
switch (hw->mode) {
case IMG_IR_M_NORMAL:
img_ir_write_timings(priv, &hw->reg_timings.timings,
RC_FILTER_NORMAL);
break;
case IMG_IR_M_REPEATING:
img_ir_write_timings(priv, &hw->reg_timings.rtimings,
RC_FILTER_NORMAL);
break;
#ifdef CONFIG_PM_SLEEP
case IMG_IR_M_WAKE:
img_ir_write_timings(priv, &hw->reg_timings.timings,
RC_FILTER_WAKEUP);
break;
#endif
}
}
unlock:
spin_unlock_irq(&priv->lock);
}
static int img_ir_clk_notify(struct notifier_block *self, unsigned long action,
void *data)
{
struct img_ir_priv *priv = container_of(self, struct img_ir_priv,
hw.clk_nb);
switch (action) {
case POST_RATE_CHANGE:
img_ir_change_frequency(priv, data);
break;
default:
break;
}
return NOTIFY_OK;
}
#endif /* CONFIG_COMMON_CLK */
/* called with priv->lock held */
void img_ir_isr_hw(struct img_ir_priv *priv, u32 irq_status)
{
struct img_ir_priv_hw *hw = &priv->hw;
u32 ir_status, len, lw, up;
unsigned int ct;
/* use the current decoder */
if (!hw->decoder)
return;
ct = hw->decoder->control.code_type;
ir_status = img_ir_read(priv, IMG_IR_STATUS);
if (!(ir_status & (IMG_IR_RXDVAL | IMG_IR_RXDVALD2))) {
if (!(priv->hw.ct_quirks[ct] & IMG_IR_QUIRK_CODE_IRQ) ||
hw->stopping)
return;
/*
* The below functionality is added as a work around to stop
* multiple Interrupts generated when an incomplete IR code is
* received by the decoder.
* The decoder generates rapid interrupts without actually
* having received any new data. After a single interrupt it's
* expected to clear up, but instead multiple interrupts are
* rapidly generated. only way to get out of this loop is to
* reset the control register after a short delay.
*/
img_ir_write(priv, IMG_IR_CONTROL, 0);
hw->quirk_suspend_irq = img_ir_read(priv, IMG_IR_IRQ_ENABLE);
img_ir_write(priv, IMG_IR_IRQ_ENABLE,
hw->quirk_suspend_irq & IMG_IR_IRQ_EDGE);
/* Timer activated to re-enable the protocol. */
mod_timer(&hw->suspend_timer,
jiffies + msecs_to_jiffies(5));
return;
}
ir_status &= ~(IMG_IR_RXDVAL | IMG_IR_RXDVALD2);
img_ir_write(priv, IMG_IR_STATUS, ir_status);
len = (ir_status & IMG_IR_RXDLEN) >> IMG_IR_RXDLEN_SHIFT;
/* some versions report wrong length for certain code types */
if (hw->ct_quirks[ct] & IMG_IR_QUIRK_CODE_LEN_INCR)
++len;
lw = img_ir_read(priv, IMG_IR_DATA_LW);
up = img_ir_read(priv, IMG_IR_DATA_UP);
img_ir_handle_data(priv, len, (u64)up << 32 | lw);
}
void img_ir_setup_hw(struct img_ir_priv *priv)
{
struct img_ir_decoder **decp;
if (!priv->hw.rdev)
return;
/* Use the first available decoder (or disable stuff if NULL) */
for (decp = img_ir_decoders; *decp; ++decp) {
const struct img_ir_decoder *dec = *decp;
if (img_ir_decoder_compatible(priv, dec)) {
img_ir_set_protocol(priv, dec->type);
img_ir_set_decoder(priv, dec, 0);
return;
}
}
img_ir_set_decoder(priv, NULL, 0);
}
/**
* img_ir_probe_hw_caps() - Probe capabilities of the hardware.
* @priv: IR private data.
*/
static void img_ir_probe_hw_caps(struct img_ir_priv *priv)
{
struct img_ir_priv_hw *hw = &priv->hw;
/*
* When a version of the block becomes available without these quirks,
* they'll have to depend on the core revision.
*/
hw->ct_quirks[IMG_IR_CODETYPE_PULSELEN]
|= IMG_IR_QUIRK_CODE_LEN_INCR;
hw->ct_quirks[IMG_IR_CODETYPE_BIPHASE]
|= IMG_IR_QUIRK_CODE_IRQ;
hw->ct_quirks[IMG_IR_CODETYPE_2BITPULSEPOS]
|= IMG_IR_QUIRK_CODE_BROKEN;
}
int img_ir_probe_hw(struct img_ir_priv *priv)
{
struct img_ir_priv_hw *hw = &priv->hw;
struct rc_dev *rdev;
int error;
/* Ensure hardware decoders have been preprocessed */
img_ir_init_decoders();
/* Probe hardware capabilities */
img_ir_probe_hw_caps(priv);
/* Set up the end timer */
timer_setup(&hw->end_timer, img_ir_end_timer, 0);
timer_setup(&hw->suspend_timer, img_ir_suspend_timer, 0);
/* Register a clock notifier */
if (!IS_ERR(priv->clk)) {
hw->clk_hz = clk_get_rate(priv->clk);
#ifdef CONFIG_COMMON_CLK
hw->clk_nb.notifier_call = img_ir_clk_notify;
error = clk_notifier_register(priv->clk, &hw->clk_nb);
if (error)
dev_warn(priv->dev,
"failed to register clock notifier\n");
#endif
} else {
hw->clk_hz = 32768;
}
/* Allocate hardware decoder */
hw->rdev = rdev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rdev) {
dev_err(priv->dev, "cannot allocate input device\n");
error = -ENOMEM;
goto err_alloc_rc;
}
rdev->priv = priv;
rdev->map_name = RC_MAP_EMPTY;
rdev->allowed_protocols = img_ir_allowed_protos(priv);
rdev->device_name = "IMG Infrared Decoder";
rdev->s_filter = img_ir_set_normal_filter;
rdev->s_wakeup_filter = img_ir_set_wakeup_filter;
/* Register hardware decoder */
error = rc_register_device(rdev);
if (error) {
dev_err(priv->dev, "failed to register IR input device\n");
goto err_register_rc;
}
/*
* Set this after rc_register_device as no protocols have been
* registered yet.
*/
rdev->change_protocol = img_ir_change_protocol;
device_init_wakeup(priv->dev, 1);
return 0;
err_register_rc:
img_ir_set_decoder(priv, NULL, 0);
hw->rdev = NULL;
rc_free_device(rdev);
err_alloc_rc:
#ifdef CONFIG_COMMON_CLK
if (!IS_ERR(priv->clk))
clk_notifier_unregister(priv->clk, &hw->clk_nb);
#endif
return error;
}
void img_ir_remove_hw(struct img_ir_priv *priv)
{
struct img_ir_priv_hw *hw = &priv->hw;
struct rc_dev *rdev = hw->rdev;
if (!rdev)
return;
img_ir_set_decoder(priv, NULL, 0);
hw->rdev = NULL;
rc_unregister_device(rdev);
#ifdef CONFIG_COMMON_CLK
if (!IS_ERR(priv->clk))
clk_notifier_unregister(priv->clk, &hw->clk_nb);
#endif
}
#ifdef CONFIG_PM_SLEEP
int img_ir_suspend(struct device *dev)
{
struct img_ir_priv *priv = dev_get_drvdata(dev);
if (device_may_wakeup(dev) && img_ir_enable_wake(priv))
enable_irq_wake(priv->irq);
return 0;
}
int img_ir_resume(struct device *dev)
{
struct img_ir_priv *priv = dev_get_drvdata(dev);
if (device_may_wakeup(dev) && img_ir_disable_wake(priv))
disable_irq_wake(priv->irq);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
| linux-master | drivers/media/rc/img-ir/img-ir-hw.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ImgTec IR Decoder setup for Philips RC-5 protocol.
*
* Copyright 2012-2014 Imagination Technologies Ltd.
*/
#include "img-ir-hw.h"
/* Convert RC5 data to a scancode */
static int img_ir_rc5_scancode(int len, u64 raw, u64 enabled_protocols,
struct img_ir_scancode_req *request)
{
unsigned int addr, cmd, tgl, start;
/* Quirk in the decoder shifts everything by 2 to the left. */
raw >>= 2;
start = (raw >> 13) & 0x01;
tgl = (raw >> 11) & 0x01;
addr = (raw >> 6) & 0x1f;
cmd = raw & 0x3f;
/*
* 12th bit is used to extend the command in extended RC5 and has
* no effect on standard RC5.
*/
cmd += ((raw >> 12) & 0x01) ? 0 : 0x40;
if (!start)
return -EINVAL;
request->protocol = RC_PROTO_RC5;
request->scancode = addr << 8 | cmd;
request->toggle = tgl;
return IMG_IR_SCANCODE;
}
/* Convert RC5 scancode to RC5 data filter */
static int img_ir_rc5_filter(const struct rc_scancode_filter *in,
struct img_ir_filter *out, u64 protocols)
{
/* Not supported by the hw. */
return -EINVAL;
}
/*
* RC-5 decoder
* see http://www.sbprojects.com/knowledge/ir/rc5.php
*/
struct img_ir_decoder img_ir_rc5 = {
.type = RC_PROTO_BIT_RC5,
.control = {
.bitoriend2 = 1,
.code_type = IMG_IR_CODETYPE_BIPHASE,
.decodend2 = 1,
},
/* main timings */
.tolerance = 16,
.unit = 888888, /* 1/36k*32=888.888microseconds */
.timings = {
/* 10 symbol */
.s10 = {
.pulse = { 1 },
.space = { 1 },
},
/* 11 symbol */
.s11 = {
.pulse = { 1 },
.space = { 1 },
},
/* free time */
.ft = {
.minlen = 14,
.maxlen = 14,
.ft_min = 5,
},
},
/* scancode logic */
.scancode = img_ir_rc5_scancode,
.filter = img_ir_rc5_filter,
};
| linux-master | drivers/media/rc/img-ir/img-ir-rc5.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cec-adap.c - HDMI Consumer Electronics Control framework - CEC adapter
*
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/types.h>
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include "cec-priv.h"
static void cec_fill_msg_report_features(struct cec_adapter *adap,
struct cec_msg *msg,
unsigned int la_idx);
static int cec_log_addr2idx(const struct cec_adapter *adap, u8 log_addr)
{
int i;
for (i = 0; i < adap->log_addrs.num_log_addrs; i++)
if (adap->log_addrs.log_addr[i] == log_addr)
return i;
return -1;
}
static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr)
{
int i = cec_log_addr2idx(adap, log_addr);
return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
}
u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
unsigned int *offset)
{
unsigned int loc = cec_get_edid_spa_location(edid, size);
if (offset)
*offset = loc;
if (loc == 0)
return CEC_PHYS_ADDR_INVALID;
return (edid[loc] << 8) | edid[loc + 1];
}
EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
void cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info,
const struct drm_connector *connector)
{
memset(conn_info, 0, sizeof(*conn_info));
conn_info->type = CEC_CONNECTOR_TYPE_DRM;
conn_info->drm.card_no = connector->dev->primary->index;
conn_info->drm.connector_id = connector->base.id;
}
EXPORT_SYMBOL_GPL(cec_fill_conn_info_from_drm);
/*
* Queue a new event for this filehandle. If ts == 0, then set it
* to the current time.
*
* We keep a queue of at most max_event events where max_event differs
* per event. If the queue becomes full, then drop the oldest event and
* keep track of how many events we've dropped.
*/
void cec_queue_event_fh(struct cec_fh *fh,
const struct cec_event *new_ev, u64 ts)
{
static const u16 max_events[CEC_NUM_EVENTS] = {
1, 1, 800, 800, 8, 8, 8, 8
};
struct cec_event_entry *entry;
unsigned int ev_idx = new_ev->event - 1;
if (WARN_ON(ev_idx >= ARRAY_SIZE(fh->events)))
return;
if (ts == 0)
ts = ktime_get_ns();
mutex_lock(&fh->lock);
if (ev_idx < CEC_NUM_CORE_EVENTS)
entry = &fh->core_events[ev_idx];
else
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
if (new_ev->event == CEC_EVENT_LOST_MSGS &&
fh->queued_events[ev_idx]) {
entry->ev.lost_msgs.lost_msgs +=
new_ev->lost_msgs.lost_msgs;
goto unlock;
}
entry->ev = *new_ev;
entry->ev.ts = ts;
if (fh->queued_events[ev_idx] < max_events[ev_idx]) {
/* Add new msg at the end of the queue */
list_add_tail(&entry->list, &fh->events[ev_idx]);
fh->queued_events[ev_idx]++;
fh->total_queued_events++;
goto unlock;
}
if (ev_idx >= CEC_NUM_CORE_EVENTS) {
list_add_tail(&entry->list, &fh->events[ev_idx]);
/* drop the oldest event */
entry = list_first_entry(&fh->events[ev_idx],
struct cec_event_entry, list);
list_del(&entry->list);
kfree(entry);
}
}
/* Mark that events were lost */
entry = list_first_entry_or_null(&fh->events[ev_idx],
struct cec_event_entry, list);
if (entry)
entry->ev.flags |= CEC_EVENT_FL_DROPPED_EVENTS;
unlock:
mutex_unlock(&fh->lock);
wake_up_interruptible(&fh->wait);
}
/* Queue a new event for all open filehandles. */
static void cec_queue_event(struct cec_adapter *adap,
const struct cec_event *ev)
{
u64 ts = ktime_get_ns();
struct cec_fh *fh;
mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list)
cec_queue_event_fh(fh, ev, ts);
mutex_unlock(&adap->devnode.lock_fhs);
}
/* Notify userspace that the CEC pin changed state at the given time. */
void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high,
bool dropped_events, ktime_t ts)
{
struct cec_event ev = {
.event = is_high ? CEC_EVENT_PIN_CEC_HIGH :
CEC_EVENT_PIN_CEC_LOW,
.flags = dropped_events ? CEC_EVENT_FL_DROPPED_EVENTS : 0,
};
struct cec_fh *fh;
mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
}
mutex_unlock(&adap->devnode.lock_fhs);
}
EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event);
/* Notify userspace that the HPD pin changed state at the given time. */
void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
{
struct cec_event ev = {
.event = is_high ? CEC_EVENT_PIN_HPD_HIGH :
CEC_EVENT_PIN_HPD_LOW,
};
struct cec_fh *fh;
mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list)
cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
mutex_unlock(&adap->devnode.lock_fhs);
}
EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event);
/* Notify userspace that the 5V pin changed state at the given time. */
void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
{
struct cec_event ev = {
.event = is_high ? CEC_EVENT_PIN_5V_HIGH :
CEC_EVENT_PIN_5V_LOW,
};
struct cec_fh *fh;
mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list)
cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
mutex_unlock(&adap->devnode.lock_fhs);
}
EXPORT_SYMBOL_GPL(cec_queue_pin_5v_event);
/*
* Queue a new message for this filehandle.
*
* We keep a queue of at most CEC_MAX_MSG_RX_QUEUE_SZ messages. If the
* queue becomes full, then drop the oldest message and keep track
* of how many messages we've dropped.
*/
static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg)
{
static const struct cec_event ev_lost_msgs = {
.event = CEC_EVENT_LOST_MSGS,
.flags = 0,
{
.lost_msgs = { 1 },
},
};
struct cec_msg_entry *entry;
mutex_lock(&fh->lock);
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
entry->msg = *msg;
/* Add new msg at the end of the queue */
list_add_tail(&entry->list, &fh->msgs);
if (fh->queued_msgs < CEC_MAX_MSG_RX_QUEUE_SZ) {
/* All is fine if there is enough room */
fh->queued_msgs++;
mutex_unlock(&fh->lock);
wake_up_interruptible(&fh->wait);
return;
}
/*
* if the message queue is full, then drop the oldest one and
* send a lost message event.
*/
entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list);
list_del(&entry->list);
kfree(entry);
}
mutex_unlock(&fh->lock);
/*
* We lost a message, either because kmalloc failed or the queue
* was full.
*/
cec_queue_event_fh(fh, &ev_lost_msgs, ktime_get_ns());
}
/*
* Queue the message for those filehandles that are in monitor mode.
* If valid_la is true (this message is for us or was sent by us),
* then pass it on to any monitoring filehandle. If this message
* isn't for us or from us, then only give it to filehandles that
* are in MONITOR_ALL mode.
*
* This can only happen if the CEC_CAP_MONITOR_ALL capability is
* set and the CEC adapter was placed in 'monitor all' mode.
*/
static void cec_queue_msg_monitor(struct cec_adapter *adap,
const struct cec_msg *msg,
bool valid_la)
{
struct cec_fh *fh;
u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
CEC_MODE_MONITOR_ALL;
mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower >= monitor_mode)
cec_queue_msg_fh(fh, msg);
}
mutex_unlock(&adap->devnode.lock_fhs);
}
/*
* Queue the message for follower filehandles.
*/
static void cec_queue_msg_followers(struct cec_adapter *adap,
const struct cec_msg *msg)
{
struct cec_fh *fh;
mutex_lock(&adap->devnode.lock_fhs);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower == CEC_MODE_FOLLOWER)
cec_queue_msg_fh(fh, msg);
}
mutex_unlock(&adap->devnode.lock_fhs);
}
/* Notify userspace of an adapter state change. */
static void cec_post_state_event(struct cec_adapter *adap)
{
struct cec_event ev = {
.event = CEC_EVENT_STATE_CHANGE,
};
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
ev.state_change.have_conn_info =
adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event(adap, &ev);
}
/*
* A CEC transmit (and a possible wait for reply) completed.
* If this was in blocking mode, then complete it, otherwise
* queue the message for userspace to dequeue later.
*
* This function is called with adap->lock held.
*/
static void cec_data_completed(struct cec_data *data)
{
/*
* Delete this transmit from the filehandle's xfer_list since
* we're done with it.
*
* Note that if the filehandle is closed before this transmit
* finished, then the release() function will set data->fh to NULL.
* Without that we would be referring to a closed filehandle.
*/
if (data->fh)
list_del_init(&data->xfer_list);
if (data->blocking) {
/*
* Someone is blocking so mark the message as completed
* and call complete.
*/
data->completed = true;
complete(&data->c);
} else {
/*
* No blocking, so just queue the message if needed and
* free the memory.
*/
if (data->fh)
cec_queue_msg_fh(data->fh, &data->msg);
kfree(data);
}
}
/*
* A pending CEC transmit needs to be cancelled, either because the CEC
* adapter is disabled or the transmit takes an impossibly long time to
* finish, or the reply timed out.
*
* This function is called with adap->lock held.
*/
static void cec_data_cancel(struct cec_data *data, u8 tx_status, u8 rx_status)
{
struct cec_adapter *adap = data->adap;
/*
* It's either the current transmit, or it is a pending
* transmit. Take the appropriate action to clear it.
*/
if (adap->transmitting == data) {
adap->transmitting = NULL;
} else {
list_del_init(&data->list);
if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
if (!WARN_ON(!adap->transmit_queue_sz))
adap->transmit_queue_sz--;
}
if (data->msg.tx_status & CEC_TX_STATUS_OK) {
data->msg.rx_ts = ktime_get_ns();
data->msg.rx_status = rx_status;
if (!data->blocking)
data->msg.tx_status = 0;
} else {
data->msg.tx_ts = ktime_get_ns();
data->msg.tx_status |= tx_status |
CEC_TX_STATUS_MAX_RETRIES;
data->msg.tx_error_cnt++;
data->attempts = 0;
if (!data->blocking)
data->msg.rx_status = 0;
}
/* Queue transmitted message for monitoring purposes */
cec_queue_msg_monitor(adap, &data->msg, 1);
if (!data->blocking && data->msg.sequence)
/* Allow drivers to react to a canceled transmit */
call_void_op(adap, adap_nb_transmit_canceled, &data->msg);
cec_data_completed(data);
}
/*
* Flush all pending transmits and cancel any pending timeout work.
*
* This function is called with adap->lock held.
*/
static void cec_flush(struct cec_adapter *adap)
{
struct cec_data *data, *n;
/*
* If the adapter is disabled, or we're asked to stop,
* then cancel any pending transmits.
*/
while (!list_empty(&adap->transmit_queue)) {
data = list_first_entry(&adap->transmit_queue,
struct cec_data, list);
cec_data_cancel(data, CEC_TX_STATUS_ABORTED, 0);
}
if (adap->transmitting)
adap->transmit_in_progress_aborted = true;
/* Cancel the pending timeout work. */
list_for_each_entry_safe(data, n, &adap->wait_queue, list) {
if (cancel_delayed_work(&data->work))
cec_data_cancel(data, CEC_TX_STATUS_OK, CEC_RX_STATUS_ABORTED);
/*
* If cancel_delayed_work returned false, then
* the cec_wait_timeout function is running,
* which will call cec_data_completed. So no
* need to do anything special in that case.
*/
}
/*
* If something went wrong and this counter isn't what it should
* be, then this will reset it back to 0. Warn if it is not 0,
* since it indicates a bug, either in this framework or in a
* CEC driver.
*/
if (WARN_ON(adap->transmit_queue_sz))
adap->transmit_queue_sz = 0;
}
/*
* Main CEC state machine
*
* Wait until the thread should be stopped, or we are not transmitting and
* a new transmit message is queued up, in which case we start transmitting
* that message. When the adapter finished transmitting the message it will
* call cec_transmit_done().
*
* If the adapter is disabled, then remove all queued messages instead.
*
* If the current transmit times out, then cancel that transmit.
*/
int cec_thread_func(void *_adap)
{
struct cec_adapter *adap = _adap;
for (;;) {
unsigned int signal_free_time;
struct cec_data *data;
bool timeout = false;
u8 attempts;
if (adap->transmit_in_progress) {
int err;
/*
* We are transmitting a message, so add a timeout
* to prevent the state machine to get stuck waiting
* for this message to finalize and add a check to
* see if the adapter is disabled in which case the
* transmit should be canceled.
*/
err = wait_event_interruptible_timeout(adap->kthread_waitq,
(adap->needs_hpd &&
(!adap->is_configured && !adap->is_configuring)) ||
kthread_should_stop() ||
(!adap->transmit_in_progress &&
!list_empty(&adap->transmit_queue)),
msecs_to_jiffies(adap->xfer_timeout_ms));
timeout = err == 0;
} else {
/* Otherwise we just wait for something to happen. */
wait_event_interruptible(adap->kthread_waitq,
kthread_should_stop() ||
(!adap->transmit_in_progress &&
!list_empty(&adap->transmit_queue)));
}
mutex_lock(&adap->lock);
if ((adap->needs_hpd &&
(!adap->is_configured && !adap->is_configuring)) ||
kthread_should_stop()) {
cec_flush(adap);
goto unlock;
}
if (adap->transmit_in_progress && timeout) {
/*
* If we timeout, then log that. Normally this does
* not happen and it is an indication of a faulty CEC
* adapter driver, or the CEC bus is in some weird
* state. On rare occasions it can happen if there is
* so much traffic on the bus that the adapter was
* unable to transmit for xfer_timeout_ms (2.1s by
* default).
*/
if (adap->transmitting) {
pr_warn("cec-%s: message %*ph timed out\n", adap->name,
adap->transmitting->msg.len,
adap->transmitting->msg.msg);
/* Just give up on this. */
cec_data_cancel(adap->transmitting,
CEC_TX_STATUS_TIMEOUT, 0);
} else {
pr_warn("cec-%s: transmit timed out\n", adap->name);
}
adap->transmit_in_progress = false;
adap->tx_timeouts++;
goto unlock;
}
/*
* If we are still transmitting, or there is nothing new to
* transmit, then just continue waiting.
*/
if (adap->transmit_in_progress || list_empty(&adap->transmit_queue))
goto unlock;
/* Get a new message to transmit */
data = list_first_entry(&adap->transmit_queue,
struct cec_data, list);
list_del_init(&data->list);
if (!WARN_ON(!data->adap->transmit_queue_sz))
adap->transmit_queue_sz--;
/* Make this the current transmitting message */
adap->transmitting = data;
/*
* Suggested number of attempts as per the CEC 2.0 spec:
* 4 attempts is the default, except for 'secondary poll
* messages', i.e. poll messages not sent during the adapter
* configuration phase when it allocates logical addresses.
*/
if (data->msg.len == 1 && adap->is_configured)
attempts = 2;
else
attempts = 4;
/* Set the suggested signal free time */
if (data->attempts) {
/* should be >= 3 data bit periods for a retry */
signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY;
} else if (adap->last_initiator !=
cec_msg_initiator(&data->msg)) {
/* should be >= 5 data bit periods for new initiator */
signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
adap->last_initiator = cec_msg_initiator(&data->msg);
} else {
/*
* should be >= 7 data bit periods for sending another
* frame immediately after another.
*/
signal_free_time = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
}
if (data->attempts == 0)
data->attempts = attempts;
adap->transmit_in_progress_aborted = false;
/* Tell the adapter to transmit, cancel on error */
if (call_op(adap, adap_transmit, data->attempts,
signal_free_time, &data->msg))
cec_data_cancel(data, CEC_TX_STATUS_ABORTED, 0);
else
adap->transmit_in_progress = true;
unlock:
mutex_unlock(&adap->lock);
if (kthread_should_stop())
break;
}
return 0;
}
/*
* Called by the CEC adapter if a transmit finished.
*/
void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt,
u8 error_cnt, ktime_t ts)
{
struct cec_data *data;
struct cec_msg *msg;
unsigned int attempts_made = arb_lost_cnt + nack_cnt +
low_drive_cnt + error_cnt;
bool done = status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK);
bool aborted = adap->transmit_in_progress_aborted;
dprintk(2, "%s: status 0x%02x\n", __func__, status);
if (attempts_made < 1)
attempts_made = 1;
mutex_lock(&adap->lock);
data = adap->transmitting;
if (!data) {
/*
* This might happen if a transmit was issued and the cable is
* unplugged while the transmit is ongoing. Ignore this
* transmit in that case.
*/
if (!adap->transmit_in_progress)
dprintk(1, "%s was called without an ongoing transmit!\n",
__func__);
adap->transmit_in_progress = false;
goto wake_thread;
}
adap->transmit_in_progress = false;
adap->transmit_in_progress_aborted = false;
msg = &data->msg;
/* Drivers must fill in the status! */
WARN_ON(status == 0);
msg->tx_ts = ktime_to_ns(ts);
msg->tx_status |= status;
msg->tx_arb_lost_cnt += arb_lost_cnt;
msg->tx_nack_cnt += nack_cnt;
msg->tx_low_drive_cnt += low_drive_cnt;
msg->tx_error_cnt += error_cnt;
/* Mark that we're done with this transmit */
adap->transmitting = NULL;
/*
* If there are still retry attempts left and there was an error and
* the hardware didn't signal that it retried itself (by setting
* CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves.
*/
if (!aborted && data->attempts > attempts_made && !done) {
/* Retry this message */
data->attempts -= attempts_made;
if (msg->timeout)
dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n",
msg->len, msg->msg, data->attempts, msg->reply);
else
dprintk(2, "retransmit: %*ph (attempts: %d)\n",
msg->len, msg->msg, data->attempts);
/* Add the message in front of the transmit queue */
list_add(&data->list, &adap->transmit_queue);
adap->transmit_queue_sz++;
goto wake_thread;
}
if (aborted && !done)
status |= CEC_TX_STATUS_ABORTED;
data->attempts = 0;
/* Always set CEC_TX_STATUS_MAX_RETRIES on error */
if (!(status & CEC_TX_STATUS_OK))
msg->tx_status |= CEC_TX_STATUS_MAX_RETRIES;
/* Queue transmitted message for monitoring purposes */
cec_queue_msg_monitor(adap, msg, 1);
if ((status & CEC_TX_STATUS_OK) && adap->is_configured &&
msg->timeout) {
/*
* Queue the message into the wait queue if we want to wait
* for a reply.
*/
list_add_tail(&data->list, &adap->wait_queue);
schedule_delayed_work(&data->work,
msecs_to_jiffies(msg->timeout));
} else {
/* Otherwise we're done */
cec_data_completed(data);
}
wake_thread:
/*
* Wake up the main thread to see if another message is ready
* for transmitting or to retry the current message.
*/
wake_up_interruptible(&adap->kthread_waitq);
mutex_unlock(&adap->lock);
}
EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
void cec_transmit_attempt_done_ts(struct cec_adapter *adap,
u8 status, ktime_t ts)
{
switch (status & ~CEC_TX_STATUS_MAX_RETRIES) {
case CEC_TX_STATUS_OK:
cec_transmit_done_ts(adap, status, 0, 0, 0, 0, ts);
return;
case CEC_TX_STATUS_ARB_LOST:
cec_transmit_done_ts(adap, status, 1, 0, 0, 0, ts);
return;
case CEC_TX_STATUS_NACK:
cec_transmit_done_ts(adap, status, 0, 1, 0, 0, ts);
return;
case CEC_TX_STATUS_LOW_DRIVE:
cec_transmit_done_ts(adap, status, 0, 0, 1, 0, ts);
return;
case CEC_TX_STATUS_ERROR:
cec_transmit_done_ts(adap, status, 0, 0, 0, 1, ts);
return;
default:
/* Should never happen */
WARN(1, "cec-%s: invalid status 0x%02x\n", adap->name, status);
return;
}
}
EXPORT_SYMBOL_GPL(cec_transmit_attempt_done_ts);
/*
* Called when waiting for a reply times out.
*/
static void cec_wait_timeout(struct work_struct *work)
{
struct cec_data *data = container_of(work, struct cec_data, work.work);
struct cec_adapter *adap = data->adap;
mutex_lock(&adap->lock);
/*
* Sanity check in case the timeout and the arrival of the message
* happened at the same time.
*/
if (list_empty(&data->list))
goto unlock;
/* Mark the message as timed out */
list_del_init(&data->list);
cec_data_cancel(data, CEC_TX_STATUS_OK, CEC_RX_STATUS_TIMEOUT);
unlock:
mutex_unlock(&adap->lock);
}
/*
* Transmit a message. The fh argument may be NULL if the transmit is not
* associated with a specific filehandle.
*
* This function is called with adap->lock held.
*/
int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
struct cec_fh *fh, bool block)
{
struct cec_data *data;
bool is_raw = msg_is_raw(msg);
if (adap->devnode.unregistered)
return -ENODEV;
msg->rx_ts = 0;
msg->tx_ts = 0;
msg->rx_status = 0;
msg->tx_status = 0;
msg->tx_arb_lost_cnt = 0;
msg->tx_nack_cnt = 0;
msg->tx_low_drive_cnt = 0;
msg->tx_error_cnt = 0;
msg->sequence = 0;
if (msg->reply && msg->timeout == 0) {
/* Make sure the timeout isn't 0. */
msg->timeout = 1000;
}
msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS | CEC_MSG_FL_RAW;
if (!msg->timeout)
msg->flags &= ~CEC_MSG_FL_REPLY_TO_FOLLOWERS;
/* Sanity checks */
if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
dprintk(1, "%s: invalid length %d\n", __func__, msg->len);
return -EINVAL;
}
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
if (msg->timeout)
dprintk(2, "%s: %*ph (wait for 0x%02x%s)\n",
__func__, msg->len, msg->msg, msg->reply,
!block ? ", nb" : "");
else
dprintk(2, "%s: %*ph%s\n",
__func__, msg->len, msg->msg, !block ? " (nb)" : "");
if (msg->timeout && msg->len == 1) {
dprintk(1, "%s: can't reply to poll msg\n", __func__);
return -EINVAL;
}
if (is_raw) {
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
} else {
/* A CDC-Only device can only send CDC messages */
if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
(msg->len == 1 || msg->msg[1] != CEC_MSG_CDC_MESSAGE)) {
dprintk(1, "%s: not a CDC message\n", __func__);
return -EINVAL;
}
if (msg->len >= 4 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
msg->msg[2] = adap->phys_addr >> 8;
msg->msg[3] = adap->phys_addr & 0xff;
}
if (msg->len == 1) {
if (cec_msg_destination(msg) == 0xf) {
dprintk(1, "%s: invalid poll message\n",
__func__);
return -EINVAL;
}
if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
/*
* If the destination is a logical address our
* adapter has already claimed, then just NACK
* this. It depends on the hardware what it will
* do with a POLL to itself (some OK this), so
* it is just as easy to handle it here so the
* behavior will be consistent.
*/
msg->tx_ts = ktime_get_ns();
msg->tx_status = CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES;
msg->tx_nack_cnt = 1;
msg->sequence = ++adap->sequence;
if (!msg->sequence)
msg->sequence = ++adap->sequence;
return 0;
}
}
if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
cec_has_log_addr(adap, cec_msg_destination(msg))) {
dprintk(1, "%s: destination is the adapter itself\n",
__func__);
return -EINVAL;
}
if (msg->len > 1 && adap->is_configured &&
!cec_has_log_addr(adap, cec_msg_initiator(msg))) {
dprintk(1, "%s: initiator has unknown logical address %d\n",
__func__, cec_msg_initiator(msg));
return -EINVAL;
}
/*
* Special case: allow Ping and IMAGE/TEXT_VIEW_ON to be
* transmitted to a TV, even if the adapter is unconfigured.
* This makes it possible to detect or wake up displays that
* pull down the HPD when in standby.
*/
if (!adap->is_configured && !adap->is_configuring &&
(msg->len > 2 ||
cec_msg_destination(msg) != CEC_LOG_ADDR_TV ||
(msg->len == 2 && msg->msg[1] != CEC_MSG_IMAGE_VIEW_ON &&
msg->msg[1] != CEC_MSG_TEXT_VIEW_ON))) {
dprintk(1, "%s: adapter is unconfigured\n", __func__);
return -ENONET;
}
}
if (!adap->is_configured && !adap->is_configuring) {
if (adap->needs_hpd) {
dprintk(1, "%s: adapter is unconfigured and needs HPD\n",
__func__);
return -ENONET;
}
if (msg->reply) {
dprintk(1, "%s: invalid msg->reply\n", __func__);
return -EINVAL;
}
}
if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
dprintk(2, "%s: transmit queue full\n", __func__);
return -EBUSY;
}
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
msg->sequence = ++adap->sequence;
if (!msg->sequence)
msg->sequence = ++adap->sequence;
data->msg = *msg;
data->fh = fh;
data->adap = adap;
data->blocking = block;
init_completion(&data->c);
INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
if (fh)
list_add_tail(&data->xfer_list, &fh->xfer_list);
else
INIT_LIST_HEAD(&data->xfer_list);
list_add_tail(&data->list, &adap->transmit_queue);
adap->transmit_queue_sz++;
if (!adap->transmitting)
wake_up_interruptible(&adap->kthread_waitq);
/* All done if we don't need to block waiting for completion */
if (!block)
return 0;
/*
* Release the lock and wait, retake the lock afterwards.
*/
mutex_unlock(&adap->lock);
wait_for_completion_killable(&data->c);
if (!data->completed)
cancel_delayed_work_sync(&data->work);
mutex_lock(&adap->lock);
/* Cancel the transmit if it was interrupted */
if (!data->completed) {
if (data->msg.tx_status & CEC_TX_STATUS_OK)
cec_data_cancel(data, CEC_TX_STATUS_OK, CEC_RX_STATUS_ABORTED);
else
cec_data_cancel(data, CEC_TX_STATUS_ABORTED, 0);
}
/* The transmit completed (possibly with an error) */
*msg = data->msg;
if (WARN_ON(!list_empty(&data->list)))
list_del(&data->list);
if (WARN_ON(!list_empty(&data->xfer_list)))
list_del(&data->xfer_list);
kfree(data);
return 0;
}
/* Helper function to be used by drivers and this framework. */
int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
bool block)
{
int ret;
mutex_lock(&adap->lock);
ret = cec_transmit_msg_fh(adap, msg, NULL, block);
mutex_unlock(&adap->lock);
return ret;
}
EXPORT_SYMBOL_GPL(cec_transmit_msg);
/*
* I don't like forward references but without this the low-level
* cec_received_msg() function would come after a bunch of high-level
* CEC protocol handling functions. That was very confusing.
*/
static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
bool is_reply);
#define DIRECTED 0x80
#define BCAST1_4 0x40
#define BCAST2_0 0x20 /* broadcast only allowed for >= 2.0 */
#define BCAST (BCAST1_4 | BCAST2_0)
#define BOTH (BCAST | DIRECTED)
/*
* Specify minimum length and whether the message is directed, broadcast
* or both. Messages that do not match the criteria are ignored as per
* the CEC specification.
*/
static const u8 cec_msg_size[256] = {
[CEC_MSG_ACTIVE_SOURCE] = 4 | BCAST,
[CEC_MSG_IMAGE_VIEW_ON] = 2 | DIRECTED,
[CEC_MSG_TEXT_VIEW_ON] = 2 | DIRECTED,
[CEC_MSG_INACTIVE_SOURCE] = 4 | DIRECTED,
[CEC_MSG_REQUEST_ACTIVE_SOURCE] = 2 | BCAST,
[CEC_MSG_ROUTING_CHANGE] = 6 | BCAST,
[CEC_MSG_ROUTING_INFORMATION] = 4 | BCAST,
[CEC_MSG_SET_STREAM_PATH] = 4 | BCAST,
[CEC_MSG_STANDBY] = 2 | BOTH,
[CEC_MSG_RECORD_OFF] = 2 | DIRECTED,
[CEC_MSG_RECORD_ON] = 3 | DIRECTED,
[CEC_MSG_RECORD_STATUS] = 3 | DIRECTED,
[CEC_MSG_RECORD_TV_SCREEN] = 2 | DIRECTED,
[CEC_MSG_CLEAR_ANALOGUE_TIMER] = 13 | DIRECTED,
[CEC_MSG_CLEAR_DIGITAL_TIMER] = 16 | DIRECTED,
[CEC_MSG_CLEAR_EXT_TIMER] = 13 | DIRECTED,
[CEC_MSG_SET_ANALOGUE_TIMER] = 13 | DIRECTED,
[CEC_MSG_SET_DIGITAL_TIMER] = 16 | DIRECTED,
[CEC_MSG_SET_EXT_TIMER] = 13 | DIRECTED,
[CEC_MSG_SET_TIMER_PROGRAM_TITLE] = 2 | DIRECTED,
[CEC_MSG_TIMER_CLEARED_STATUS] = 3 | DIRECTED,
[CEC_MSG_TIMER_STATUS] = 3 | DIRECTED,
[CEC_MSG_CEC_VERSION] = 3 | DIRECTED,
[CEC_MSG_GET_CEC_VERSION] = 2 | DIRECTED,
[CEC_MSG_GIVE_PHYSICAL_ADDR] = 2 | DIRECTED,
[CEC_MSG_GET_MENU_LANGUAGE] = 2 | DIRECTED,
[CEC_MSG_REPORT_PHYSICAL_ADDR] = 5 | BCAST,
[CEC_MSG_SET_MENU_LANGUAGE] = 5 | BCAST,
[CEC_MSG_REPORT_FEATURES] = 6 | BCAST,
[CEC_MSG_GIVE_FEATURES] = 2 | DIRECTED,
[CEC_MSG_DECK_CONTROL] = 3 | DIRECTED,
[CEC_MSG_DECK_STATUS] = 3 | DIRECTED,
[CEC_MSG_GIVE_DECK_STATUS] = 3 | DIRECTED,
[CEC_MSG_PLAY] = 3 | DIRECTED,
[CEC_MSG_GIVE_TUNER_DEVICE_STATUS] = 3 | DIRECTED,
[CEC_MSG_SELECT_ANALOGUE_SERVICE] = 6 | DIRECTED,
[CEC_MSG_SELECT_DIGITAL_SERVICE] = 9 | DIRECTED,
[CEC_MSG_TUNER_DEVICE_STATUS] = 7 | DIRECTED,
[CEC_MSG_TUNER_STEP_DECREMENT] = 2 | DIRECTED,
[CEC_MSG_TUNER_STEP_INCREMENT] = 2 | DIRECTED,
[CEC_MSG_DEVICE_VENDOR_ID] = 5 | BCAST,
[CEC_MSG_GIVE_DEVICE_VENDOR_ID] = 2 | DIRECTED,
[CEC_MSG_VENDOR_COMMAND] = 2 | DIRECTED,
[CEC_MSG_VENDOR_COMMAND_WITH_ID] = 5 | BOTH,
[CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN] = 2 | BOTH,
[CEC_MSG_VENDOR_REMOTE_BUTTON_UP] = 2 | BOTH,
[CEC_MSG_SET_OSD_STRING] = 3 | DIRECTED,
[CEC_MSG_GIVE_OSD_NAME] = 2 | DIRECTED,
[CEC_MSG_SET_OSD_NAME] = 2 | DIRECTED,
[CEC_MSG_MENU_REQUEST] = 3 | DIRECTED,
[CEC_MSG_MENU_STATUS] = 3 | DIRECTED,
[CEC_MSG_USER_CONTROL_PRESSED] = 3 | DIRECTED,
[CEC_MSG_USER_CONTROL_RELEASED] = 2 | DIRECTED,
[CEC_MSG_GIVE_DEVICE_POWER_STATUS] = 2 | DIRECTED,
[CEC_MSG_REPORT_POWER_STATUS] = 3 | DIRECTED | BCAST2_0,
[CEC_MSG_FEATURE_ABORT] = 4 | DIRECTED,
[CEC_MSG_ABORT] = 2 | DIRECTED,
[CEC_MSG_GIVE_AUDIO_STATUS] = 2 | DIRECTED,
[CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS] = 2 | DIRECTED,
[CEC_MSG_REPORT_AUDIO_STATUS] = 3 | DIRECTED,
[CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
[CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
[CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH,
[CEC_MSG_SET_AUDIO_VOLUME_LEVEL] = 3 | DIRECTED,
[CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED,
[CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED,
[CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED,
[CEC_MSG_INITIATE_ARC] = 2 | DIRECTED,
[CEC_MSG_REPORT_ARC_INITIATED] = 2 | DIRECTED,
[CEC_MSG_REPORT_ARC_TERMINATED] = 2 | DIRECTED,
[CEC_MSG_REQUEST_ARC_INITIATION] = 2 | DIRECTED,
[CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
[CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
[CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
[CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
[CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
};
/* Called by the CEC adapter if a message is received */
void cec_received_msg_ts(struct cec_adapter *adap,
struct cec_msg *msg, ktime_t ts)
{
struct cec_data *data;
u8 msg_init = cec_msg_initiator(msg);
u8 msg_dest = cec_msg_destination(msg);
u8 cmd = msg->msg[1];
bool is_reply = false;
bool valid_la = true;
bool monitor_valid_la = true;
u8 min_len = 0;
if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
return;
if (adap->devnode.unregistered)
return;
/*
* Some CEC adapters will receive the messages that they transmitted.
* This test filters out those messages by checking if we are the
* initiator, and just returning in that case.
*
* Note that this won't work if this is an Unregistered device.
*
* It is bad practice if the hardware receives the message that it
* transmitted and luckily most CEC adapters behave correctly in this
* respect.
*/
if (msg_init != CEC_LOG_ADDR_UNREGISTERED &&
cec_has_log_addr(adap, msg_init))
return;
msg->rx_ts = ktime_to_ns(ts);
msg->rx_status = CEC_RX_STATUS_OK;
msg->sequence = msg->reply = msg->timeout = 0;
msg->tx_status = 0;
msg->tx_ts = 0;
msg->tx_arb_lost_cnt = 0;
msg->tx_nack_cnt = 0;
msg->tx_low_drive_cnt = 0;
msg->tx_error_cnt = 0;
msg->flags = 0;
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
mutex_lock(&adap->lock);
dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
if (!adap->transmit_in_progress)
adap->last_initiator = 0xff;
/* Check if this message was for us (directed or broadcast). */
if (!cec_msg_is_broadcast(msg)) {
valid_la = cec_has_log_addr(adap, msg_dest);
monitor_valid_la = valid_la;
}
/*
* Check if the length is not too short or if the message is a
* broadcast message where a directed message was expected or
* vice versa. If so, then the message has to be ignored (according
* to section CEC 7.3 and CEC 12.2).
*/
if (valid_la && msg->len > 1 && cec_msg_size[cmd]) {
u8 dir_fl = cec_msg_size[cmd] & BOTH;
min_len = cec_msg_size[cmd] & 0x1f;
if (msg->len < min_len)
valid_la = false;
else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
valid_la = false;
else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST))
valid_la = false;
else if (cec_msg_is_broadcast(msg) &&
adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 &&
!(dir_fl & BCAST1_4))
valid_la = false;
}
if (valid_la && min_len) {
/* These messages have special length requirements */
switch (cmd) {
case CEC_MSG_TIMER_STATUS:
if (msg->msg[2] & 0x10) {
switch (msg->msg[2] & 0xf) {
case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
if (msg->len < 5)
valid_la = false;
break;
}
} else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
if (msg->len < 5)
valid_la = false;
}
break;
case CEC_MSG_RECORD_ON:
switch (msg->msg[2]) {
case CEC_OP_RECORD_SRC_OWN:
break;
case CEC_OP_RECORD_SRC_DIGITAL:
if (msg->len < 10)
valid_la = false;
break;
case CEC_OP_RECORD_SRC_ANALOG:
if (msg->len < 7)
valid_la = false;
break;
case CEC_OP_RECORD_SRC_EXT_PLUG:
if (msg->len < 4)
valid_la = false;
break;
case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
if (msg->len < 5)
valid_la = false;
break;
}
break;
}
}
/* It's a valid message and not a poll or CDC message */
if (valid_la && msg->len > 1 && cmd != CEC_MSG_CDC_MESSAGE) {
bool abort = cmd == CEC_MSG_FEATURE_ABORT;
/* The aborted command is in msg[2] */
if (abort)
cmd = msg->msg[2];
/*
* Walk over all transmitted messages that are waiting for a
* reply.
*/
list_for_each_entry(data, &adap->wait_queue, list) {
struct cec_msg *dst = &data->msg;
/*
* The *only* CEC message that has two possible replies
* is CEC_MSG_INITIATE_ARC.
* In this case allow either of the two replies.
*/
if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC &&
(cmd == CEC_MSG_REPORT_ARC_INITIATED ||
cmd == CEC_MSG_REPORT_ARC_TERMINATED) &&
(dst->reply == CEC_MSG_REPORT_ARC_INITIATED ||
dst->reply == CEC_MSG_REPORT_ARC_TERMINATED))
dst->reply = cmd;
/* Does the command match? */
if ((abort && cmd != dst->msg[1]) ||
(!abort && cmd != dst->reply))
continue;
/* Does the addressing match? */
if (msg_init != cec_msg_destination(dst) &&
!cec_msg_is_broadcast(dst))
continue;
/* We got a reply */
memcpy(dst->msg, msg->msg, msg->len);
dst->len = msg->len;
dst->rx_ts = msg->rx_ts;
dst->rx_status = msg->rx_status;
if (abort)
dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
msg->flags = dst->flags;
msg->sequence = dst->sequence;
/* Remove it from the wait_queue */
list_del_init(&data->list);
/* Cancel the pending timeout work */
if (!cancel_delayed_work(&data->work)) {
mutex_unlock(&adap->lock);
cancel_delayed_work_sync(&data->work);
mutex_lock(&adap->lock);
}
/*
* Mark this as a reply, provided someone is still
* waiting for the answer.
*/
if (data->fh)
is_reply = true;
cec_data_completed(data);
break;
}
}
mutex_unlock(&adap->lock);
/* Pass the message on to any monitoring filehandles */
cec_queue_msg_monitor(adap, msg, monitor_valid_la);
/* We're done if it is not for us or a poll message */
if (!valid_la || msg->len <= 1)
return;
if (adap->log_addrs.log_addr_mask == 0)
return;
/*
* Process the message on the protocol level. If is_reply is true,
* then cec_receive_notify() won't pass on the reply to the listener(s)
* since that was already done by cec_data_completed() above.
*/
cec_receive_notify(adap, msg, is_reply);
}
EXPORT_SYMBOL_GPL(cec_received_msg_ts);
/* Logical Address Handling */
/*
* Attempt to claim a specific logical address.
*
* This function is called with adap->lock held.
*/
static int cec_config_log_addr(struct cec_adapter *adap,
unsigned int idx,
unsigned int log_addr)
{
struct cec_log_addrs *las = &adap->log_addrs;
struct cec_msg msg = { };
const unsigned int max_retries = 2;
unsigned int i;
int err;
if (cec_has_log_addr(adap, log_addr))
return 0;
/* Send poll message */
msg.len = 1;
msg.msg[0] = (log_addr << 4) | log_addr;
for (i = 0; i < max_retries; i++) {
err = cec_transmit_msg_fh(adap, &msg, NULL, true);
/*
* While trying to poll the physical address was reset
* and the adapter was unconfigured, so bail out.
*/
if (adap->phys_addr == CEC_PHYS_ADDR_INVALID)
return -EINTR;
/* Also bail out if the PA changed while configuring. */
if (adap->must_reconfigure)
return -EINTR;
if (err)
return err;
/*
* The message was aborted or timed out due to a disconnect or
* unconfigure, just bail out.
*/
if (msg.tx_status &
(CEC_TX_STATUS_ABORTED | CEC_TX_STATUS_TIMEOUT))
return -EINTR;
if (msg.tx_status & CEC_TX_STATUS_OK)
return 0;
if (msg.tx_status & CEC_TX_STATUS_NACK)
break;
/*
* Retry up to max_retries times if the message was neither
* OKed or NACKed. This can happen due to e.g. a Lost
* Arbitration condition.
*/
}
/*
* If we are unable to get an OK or a NACK after max_retries attempts
* (and note that each attempt already consists of four polls), then
* we assume that something is really weird and that it is not a
* good idea to try and claim this logical address.
*/
if (i == max_retries) {
dprintk(0, "polling for LA %u failed with tx_status=0x%04x\n",
log_addr, msg.tx_status);
return 0;
}
/*
* Message not acknowledged, so this logical
* address is free to use.
*/
err = call_op(adap, adap_log_addr, log_addr);
if (err)
return err;
las->log_addr[idx] = log_addr;
las->log_addr_mask |= 1 << log_addr;
return 1;
}
/*
* Unconfigure the adapter: clear all logical addresses and send
* the state changed event.
*
* This function is called with adap->lock held.
*/
static void cec_adap_unconfigure(struct cec_adapter *adap)
{
if (!adap->needs_hpd || adap->phys_addr != CEC_PHYS_ADDR_INVALID)
WARN_ON(call_op(adap, adap_log_addr, CEC_LOG_ADDR_INVALID));
adap->log_addrs.log_addr_mask = 0;
adap->is_configured = false;
cec_flush(adap);
wake_up_interruptible(&adap->kthread_waitq);
cec_post_state_event(adap);
call_void_op(adap, adap_unconfigured);
}
/*
* Attempt to claim the required logical addresses.
*/
static int cec_config_thread_func(void *arg)
{
/* The various LAs for each type of device */
static const u8 tv_log_addrs[] = {
CEC_LOG_ADDR_TV, CEC_LOG_ADDR_SPECIFIC,
CEC_LOG_ADDR_INVALID
};
static const u8 record_log_addrs[] = {
CEC_LOG_ADDR_RECORD_1, CEC_LOG_ADDR_RECORD_2,
CEC_LOG_ADDR_RECORD_3,
CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
CEC_LOG_ADDR_INVALID
};
static const u8 tuner_log_addrs[] = {
CEC_LOG_ADDR_TUNER_1, CEC_LOG_ADDR_TUNER_2,
CEC_LOG_ADDR_TUNER_3, CEC_LOG_ADDR_TUNER_4,
CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
CEC_LOG_ADDR_INVALID
};
static const u8 playback_log_addrs[] = {
CEC_LOG_ADDR_PLAYBACK_1, CEC_LOG_ADDR_PLAYBACK_2,
CEC_LOG_ADDR_PLAYBACK_3,
CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
CEC_LOG_ADDR_INVALID
};
static const u8 audiosystem_log_addrs[] = {
CEC_LOG_ADDR_AUDIOSYSTEM,
CEC_LOG_ADDR_INVALID
};
static const u8 specific_use_log_addrs[] = {
CEC_LOG_ADDR_SPECIFIC,
CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
CEC_LOG_ADDR_INVALID
};
static const u8 *type2addrs[6] = {
[CEC_LOG_ADDR_TYPE_TV] = tv_log_addrs,
[CEC_LOG_ADDR_TYPE_RECORD] = record_log_addrs,
[CEC_LOG_ADDR_TYPE_TUNER] = tuner_log_addrs,
[CEC_LOG_ADDR_TYPE_PLAYBACK] = playback_log_addrs,
[CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = audiosystem_log_addrs,
[CEC_LOG_ADDR_TYPE_SPECIFIC] = specific_use_log_addrs,
};
static const u16 type2mask[] = {
[CEC_LOG_ADDR_TYPE_TV] = CEC_LOG_ADDR_MASK_TV,
[CEC_LOG_ADDR_TYPE_RECORD] = CEC_LOG_ADDR_MASK_RECORD,
[CEC_LOG_ADDR_TYPE_TUNER] = CEC_LOG_ADDR_MASK_TUNER,
[CEC_LOG_ADDR_TYPE_PLAYBACK] = CEC_LOG_ADDR_MASK_PLAYBACK,
[CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = CEC_LOG_ADDR_MASK_AUDIOSYSTEM,
[CEC_LOG_ADDR_TYPE_SPECIFIC] = CEC_LOG_ADDR_MASK_SPECIFIC,
};
struct cec_adapter *adap = arg;
struct cec_log_addrs *las = &adap->log_addrs;
int err;
int i, j;
mutex_lock(&adap->lock);
dprintk(1, "physical address: %x.%x.%x.%x, claim %d logical addresses\n",
cec_phys_addr_exp(adap->phys_addr), las->num_log_addrs);
las->log_addr_mask = 0;
if (las->log_addr_type[0] == CEC_LOG_ADDR_TYPE_UNREGISTERED)
goto configured;
reconfigure:
for (i = 0; i < las->num_log_addrs; i++) {
unsigned int type = las->log_addr_type[i];
const u8 *la_list;
u8 last_la;
/*
* The TV functionality can only map to physical address 0.
* For any other address, try the Specific functionality
* instead as per the spec.
*/
if (adap->phys_addr && type == CEC_LOG_ADDR_TYPE_TV)
type = CEC_LOG_ADDR_TYPE_SPECIFIC;
la_list = type2addrs[type];
last_la = las->log_addr[i];
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (last_la == CEC_LOG_ADDR_INVALID ||
last_la == CEC_LOG_ADDR_UNREGISTERED ||
!((1 << last_la) & type2mask[type]))
last_la = la_list[0];
err = cec_config_log_addr(adap, i, last_la);
if (adap->must_reconfigure) {
adap->must_reconfigure = false;
las->log_addr_mask = 0;
goto reconfigure;
}
if (err > 0) /* Reused last LA */
continue;
if (err < 0)
goto unconfigure;
for (j = 0; la_list[j] != CEC_LOG_ADDR_INVALID; j++) {
/* Tried this one already, skip it */
if (la_list[j] == last_la)
continue;
/* The backup addresses are CEC 2.0 specific */
if ((la_list[j] == CEC_LOG_ADDR_BACKUP_1 ||
la_list[j] == CEC_LOG_ADDR_BACKUP_2) &&
las->cec_version < CEC_OP_CEC_VERSION_2_0)
continue;
err = cec_config_log_addr(adap, i, la_list[j]);
if (err == 0) /* LA is in use */
continue;
if (err < 0)
goto unconfigure;
/* Done, claimed an LA */
break;
}
if (la_list[j] == CEC_LOG_ADDR_INVALID)
dprintk(1, "could not claim LA %d\n", i);
}
if (adap->log_addrs.log_addr_mask == 0 &&
!(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
goto unconfigure;
configured:
if (adap->log_addrs.log_addr_mask == 0) {
/* Fall back to unregistered */
las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
las->log_addr_mask = 1 << las->log_addr[0];
for (i = 1; i < las->num_log_addrs; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
}
for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
adap->is_configured = true;
adap->is_configuring = false;
adap->must_reconfigure = false;
cec_post_state_event(adap);
/*
* Now post the Report Features and Report Physical Address broadcast
* messages. Note that these are non-blocking transmits, meaning that
* they are just queued up and once adap->lock is unlocked the main
* thread will kick in and start transmitting these.
*
* If after this function is done (but before one or more of these
* messages are actually transmitted) the CEC adapter is unconfigured,
* then any remaining messages will be dropped by the main thread.
*/
for (i = 0; i < las->num_log_addrs; i++) {
struct cec_msg msg = {};
if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
(las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
continue;
msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
/* Report Features must come first according to CEC 2.0 */
if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
cec_fill_msg_report_features(adap, &msg, i);
cec_transmit_msg_fh(adap, &msg, NULL, false);
}
/* Report Physical Address */
cec_msg_report_physical_addr(&msg, adap->phys_addr,
las->primary_device_type[i]);
dprintk(1, "config: la %d pa %x.%x.%x.%x\n",
las->log_addr[i],
cec_phys_addr_exp(adap->phys_addr));
cec_transmit_msg_fh(adap, &msg, NULL, false);
/* Report Vendor ID */
if (adap->log_addrs.vendor_id != CEC_VENDOR_ID_NONE) {
cec_msg_device_vendor_id(&msg,
adap->log_addrs.vendor_id);
cec_transmit_msg_fh(adap, &msg, NULL, false);
}
}
adap->kthread_config = NULL;
complete(&adap->config_completion);
mutex_unlock(&adap->lock);
call_void_op(adap, configured);
return 0;
unconfigure:
for (i = 0; i < las->num_log_addrs; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
cec_adap_unconfigure(adap);
adap->is_configuring = false;
adap->must_reconfigure = false;
adap->kthread_config = NULL;
complete(&adap->config_completion);
mutex_unlock(&adap->lock);
return 0;
}
/*
* Called from either __cec_s_phys_addr or __cec_s_log_addrs to claim the
* logical addresses.
*
* This function is called with adap->lock held.
*/
static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
{
if (WARN_ON(adap->is_configuring || adap->is_configured))
return;
init_completion(&adap->config_completion);
/* Ready to kick off the thread */
adap->is_configuring = true;
adap->kthread_config = kthread_run(cec_config_thread_func, adap,
"ceccfg-%s", adap->name);
if (IS_ERR(adap->kthread_config)) {
adap->kthread_config = NULL;
adap->is_configuring = false;
} else if (block) {
mutex_unlock(&adap->lock);
wait_for_completion(&adap->config_completion);
mutex_lock(&adap->lock);
}
}
/*
* Helper function to enable/disable the CEC adapter.
*
* This function is called with adap->lock held.
*/
int cec_adap_enable(struct cec_adapter *adap)
{
bool enable;
int ret = 0;
enable = adap->monitor_all_cnt || adap->monitor_pin_cnt ||
adap->log_addrs.num_log_addrs;
if (adap->needs_hpd)
enable = enable && adap->phys_addr != CEC_PHYS_ADDR_INVALID;
if (adap->devnode.unregistered)
enable = false;
if (enable == adap->is_enabled)
return 0;
/* serialize adap_enable */
mutex_lock(&adap->devnode.lock);
if (enable) {
adap->last_initiator = 0xff;
adap->transmit_in_progress = false;
ret = adap->ops->adap_enable(adap, true);
if (!ret) {
/*
* Enable monitor-all/pin modes if needed. We warn, but
* continue if this fails as this is not a critical error.
*/
if (adap->monitor_all_cnt)
WARN_ON(call_op(adap, adap_monitor_all_enable, true));
if (adap->monitor_pin_cnt)
WARN_ON(call_op(adap, adap_monitor_pin_enable, true));
}
} else {
/* Disable monitor-all/pin modes if needed (needs_hpd == 1) */
if (adap->monitor_all_cnt)
WARN_ON(call_op(adap, adap_monitor_all_enable, false));
if (adap->monitor_pin_cnt)
WARN_ON(call_op(adap, adap_monitor_pin_enable, false));
WARN_ON(adap->ops->adap_enable(adap, false));
adap->last_initiator = 0xff;
adap->transmit_in_progress = false;
adap->transmit_in_progress_aborted = false;
if (adap->transmitting)
cec_data_cancel(adap->transmitting, CEC_TX_STATUS_ABORTED, 0);
}
if (!ret)
adap->is_enabled = enable;
wake_up_interruptible(&adap->kthread_waitq);
mutex_unlock(&adap->devnode.lock);
return ret;
}
/* Set a new physical address and send an event notifying userspace of this.
*
* This function is called with adap->lock held.
*/
void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
{
bool becomes_invalid = phys_addr == CEC_PHYS_ADDR_INVALID;
bool is_invalid = adap->phys_addr == CEC_PHYS_ADDR_INVALID;
if (phys_addr == adap->phys_addr)
return;
if (!becomes_invalid && adap->devnode.unregistered)
return;
dprintk(1, "new physical address %x.%x.%x.%x\n",
cec_phys_addr_exp(phys_addr));
if (becomes_invalid || !is_invalid) {
adap->phys_addr = CEC_PHYS_ADDR_INVALID;
cec_post_state_event(adap);
cec_adap_unconfigure(adap);
if (becomes_invalid) {
cec_adap_enable(adap);
return;
}
}
adap->phys_addr = phys_addr;
if (is_invalid)
cec_adap_enable(adap);
cec_post_state_event(adap);
if (!adap->log_addrs.num_log_addrs)
return;
if (adap->is_configuring)
adap->must_reconfigure = true;
else
cec_claim_log_addrs(adap, block);
}
void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
{
if (IS_ERR_OR_NULL(adap))
return;
mutex_lock(&adap->lock);
__cec_s_phys_addr(adap, phys_addr, block);
mutex_unlock(&adap->lock);
}
EXPORT_SYMBOL_GPL(cec_s_phys_addr);
void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
const struct edid *edid)
{
u16 pa = CEC_PHYS_ADDR_INVALID;
if (edid && edid->extensions)
pa = cec_get_edid_phys_addr((const u8 *)edid,
EDID_LENGTH * (edid->extensions + 1), NULL);
cec_s_phys_addr(adap, pa, false);
}
EXPORT_SYMBOL_GPL(cec_s_phys_addr_from_edid);
void cec_s_conn_info(struct cec_adapter *adap,
const struct cec_connector_info *conn_info)
{
if (IS_ERR_OR_NULL(adap))
return;
if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
return;
mutex_lock(&adap->lock);
if (conn_info)
adap->conn_info = *conn_info;
else
memset(&adap->conn_info, 0, sizeof(adap->conn_info));
cec_post_state_event(adap);
mutex_unlock(&adap->lock);
}
EXPORT_SYMBOL_GPL(cec_s_conn_info);
/*
* Called from either the ioctl or a driver to set the logical addresses.
*
* This function is called with adap->lock held.
*/
int __cec_s_log_addrs(struct cec_adapter *adap,
struct cec_log_addrs *log_addrs, bool block)
{
u16 type_mask = 0;
int err;
int i;
if (adap->devnode.unregistered)
return -ENODEV;
if (!log_addrs || log_addrs->num_log_addrs == 0) {
if (!adap->log_addrs.num_log_addrs)
return 0;
if (adap->is_configuring || adap->is_configured)
cec_adap_unconfigure(adap);
adap->log_addrs.num_log_addrs = 0;
for (i = 0; i < CEC_MAX_LOG_ADDRS; i++)
adap->log_addrs.log_addr[i] = CEC_LOG_ADDR_INVALID;
adap->log_addrs.osd_name[0] = '\0';
adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
cec_adap_enable(adap);
return 0;
}
if (log_addrs->flags & CEC_LOG_ADDRS_FL_CDC_ONLY) {
/*
* Sanitize log_addrs fields if a CDC-Only device is
* requested.
*/
log_addrs->num_log_addrs = 1;
log_addrs->osd_name[0] = '\0';
log_addrs->vendor_id = CEC_VENDOR_ID_NONE;
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
/*
* This is just an internal convention since a CDC-Only device
* doesn't have to be a switch. But switches already use
* unregistered, so it makes some kind of sense to pick this
* as the primary device. Since a CDC-Only device never sends
* any 'normal' CEC messages this primary device type is never
* sent over the CEC bus.
*/
log_addrs->primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_SWITCH;
log_addrs->all_device_types[0] = 0;
log_addrs->features[0][0] = 0;
log_addrs->features[0][1] = 0;
}
/* Ensure the osd name is 0-terminated */
log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
/* Sanity checks */
if (log_addrs->num_log_addrs > adap->available_log_addrs) {
dprintk(1, "num_log_addrs > %d\n", adap->available_log_addrs);
return -EINVAL;
}
/*
* Vendor ID is a 24 bit number, so check if the value is
* within the correct range.
*/
if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE &&
(log_addrs->vendor_id & 0xff000000) != 0) {
dprintk(1, "invalid vendor ID\n");
return -EINVAL;
}
if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 &&
log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0) {
dprintk(1, "invalid CEC version\n");
return -EINVAL;
}
if (log_addrs->num_log_addrs > 1)
for (i = 0; i < log_addrs->num_log_addrs; i++)
if (log_addrs->log_addr_type[i] ==
CEC_LOG_ADDR_TYPE_UNREGISTERED) {
dprintk(1, "num_log_addrs > 1 can't be combined with unregistered LA\n");
return -EINVAL;
}
for (i = 0; i < log_addrs->num_log_addrs; i++) {
const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
u8 *features = log_addrs->features[i];
bool op_is_dev_features = false;
unsigned int j;
log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
dprintk(1, "unknown logical address type\n");
return -EINVAL;
}
if (type_mask & (1 << log_addrs->log_addr_type[i])) {
dprintk(1, "duplicate logical address type\n");
return -EINVAL;
}
type_mask |= 1 << log_addrs->log_addr_type[i];
if ((type_mask & (1 << CEC_LOG_ADDR_TYPE_RECORD)) &&
(type_mask & (1 << CEC_LOG_ADDR_TYPE_PLAYBACK))) {
/* Record already contains the playback functionality */
dprintk(1, "invalid record + playback combination\n");
return -EINVAL;
}
if (log_addrs->primary_device_type[i] >
CEC_OP_PRIM_DEVTYPE_PROCESSOR) {
dprintk(1, "unknown primary device type\n");
return -EINVAL;
}
if (log_addrs->primary_device_type[i] == 2) {
dprintk(1, "invalid primary device type\n");
return -EINVAL;
}
for (j = 0; j < feature_sz; j++) {
if ((features[j] & 0x80) == 0) {
if (op_is_dev_features)
break;
op_is_dev_features = true;
}
}
if (!op_is_dev_features || j == feature_sz) {
dprintk(1, "malformed features\n");
return -EINVAL;
}
/* Zero unused part of the feature array */
memset(features + j + 1, 0, feature_sz - j - 1);
}
if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
if (log_addrs->num_log_addrs > 2) {
dprintk(1, "CEC 2.0 allows no more than 2 logical addresses\n");
return -EINVAL;
}
if (log_addrs->num_log_addrs == 2) {
if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) |
(1 << CEC_LOG_ADDR_TYPE_TV)))) {
dprintk(1, "two LAs is only allowed for audiosystem and TV\n");
return -EINVAL;
}
if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) |
(1 << CEC_LOG_ADDR_TYPE_RECORD)))) {
dprintk(1, "an audiosystem/TV can only be combined with record or playback\n");
return -EINVAL;
}
}
}
/* Zero unused LAs */
for (i = log_addrs->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) {
log_addrs->primary_device_type[i] = 0;
log_addrs->log_addr_type[i] = 0;
log_addrs->all_device_types[i] = 0;
memset(log_addrs->features[i], 0,
sizeof(log_addrs->features[i]));
}
log_addrs->log_addr_mask = adap->log_addrs.log_addr_mask;
adap->log_addrs = *log_addrs;
err = cec_adap_enable(adap);
if (!err && adap->phys_addr != CEC_PHYS_ADDR_INVALID)
cec_claim_log_addrs(adap, block);
return err;
}
int cec_s_log_addrs(struct cec_adapter *adap,
struct cec_log_addrs *log_addrs, bool block)
{
int err;
mutex_lock(&adap->lock);
err = __cec_s_log_addrs(adap, log_addrs, block);
mutex_unlock(&adap->lock);
return err;
}
EXPORT_SYMBOL_GPL(cec_s_log_addrs);
/* High-level core CEC message handling */
/* Fill in the Report Features message */
static void cec_fill_msg_report_features(struct cec_adapter *adap,
struct cec_msg *msg,
unsigned int la_idx)
{
const struct cec_log_addrs *las = &adap->log_addrs;
const u8 *features = las->features[la_idx];
bool op_is_dev_features = false;
unsigned int idx;
/* Report Features */
msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
msg->len = 4;
msg->msg[1] = CEC_MSG_REPORT_FEATURES;
msg->msg[2] = adap->log_addrs.cec_version;
msg->msg[3] = las->all_device_types[la_idx];
/* Write RC Profiles first, then Device Features */
for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
msg->msg[msg->len++] = features[idx];
if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
if (op_is_dev_features)
break;
op_is_dev_features = true;
}
}
}
/* Transmit the Feature Abort message */
static int cec_feature_abort_reason(struct cec_adapter *adap,
struct cec_msg *msg, u8 reason)
{
struct cec_msg tx_msg = { };
/*
* Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT
* message!
*/
if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
return 0;
/* Don't Feature Abort messages from 'Unregistered' */
if (cec_msg_initiator(msg) == CEC_LOG_ADDR_UNREGISTERED)
return 0;
cec_msg_set_reply_to(&tx_msg, msg);
cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
return cec_transmit_msg(adap, &tx_msg, false);
}
static int cec_feature_abort(struct cec_adapter *adap, struct cec_msg *msg)
{
return cec_feature_abort_reason(adap, msg,
CEC_OP_ABORT_UNRECOGNIZED_OP);
}
static int cec_feature_refused(struct cec_adapter *adap, struct cec_msg *msg)
{
return cec_feature_abort_reason(adap, msg,
CEC_OP_ABORT_REFUSED);
}
/*
* Called when a CEC message is received. This function will do any
* necessary core processing. The is_reply bool is true if this message
* is a reply to an earlier transmit.
*
* The message is either a broadcast message or a valid directed message.
*/
static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
bool is_reply)
{
bool is_broadcast = cec_msg_is_broadcast(msg);
u8 dest_laddr = cec_msg_destination(msg);
u8 init_laddr = cec_msg_initiator(msg);
u8 devtype = cec_log_addr2dev(adap, dest_laddr);
int la_idx = cec_log_addr2idx(adap, dest_laddr);
bool from_unregistered = init_laddr == 0xf;
struct cec_msg tx_cec_msg = { };
dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
/* If this is a CDC-Only device, then ignore any non-CDC messages */
if (cec_is_cdc_only(&adap->log_addrs) &&
msg->msg[1] != CEC_MSG_CDC_MESSAGE)
return 0;
/* Allow drivers to process the message first */
if (adap->ops->received && !adap->devnode.unregistered &&
adap->ops->received(adap, msg) != -ENOMSG)
return 0;
/*
* REPORT_PHYSICAL_ADDR, CEC_MSG_USER_CONTROL_PRESSED and
* CEC_MSG_USER_CONTROL_RELEASED messages always have to be
* handled by the CEC core, even if the passthrough mode is on.
* The others are just ignored if passthrough mode is on.
*/
switch (msg->msg[1]) {
case CEC_MSG_GET_CEC_VERSION:
case CEC_MSG_ABORT:
case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
case CEC_MSG_GIVE_OSD_NAME:
/*
* These messages reply with a directed message, so ignore if
* the initiator is Unregistered.
*/
if (!adap->passthrough && from_unregistered)
return 0;
fallthrough;
case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
case CEC_MSG_GIVE_FEATURES:
case CEC_MSG_GIVE_PHYSICAL_ADDR:
/*
* Skip processing these messages if the passthrough mode
* is on.
*/
if (adap->passthrough)
goto skip_processing;
/* Ignore if addressing is wrong */
if (is_broadcast)
return 0;
break;
case CEC_MSG_USER_CONTROL_PRESSED:
case CEC_MSG_USER_CONTROL_RELEASED:
/* Wrong addressing mode: don't process */
if (is_broadcast || from_unregistered)
goto skip_processing;
break;
case CEC_MSG_REPORT_PHYSICAL_ADDR:
/*
* This message is always processed, regardless of the
* passthrough setting.
*
* Exception: don't process if wrong addressing mode.
*/
if (!is_broadcast)
goto skip_processing;
break;
default:
break;
}
cec_msg_set_reply_to(&tx_cec_msg, msg);
switch (msg->msg[1]) {
/* The following messages are processed but still passed through */
case CEC_MSG_REPORT_PHYSICAL_ADDR: {
u16 pa = (msg->msg[2] << 8) | msg->msg[3];
dprintk(1, "reported physical address %x.%x.%x.%x for logical address %d\n",
cec_phys_addr_exp(pa), init_laddr);
break;
}
case CEC_MSG_USER_CONTROL_PRESSED:
if (!(adap->capabilities & CEC_CAP_RC) ||
!(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
break;
#ifdef CONFIG_MEDIA_CEC_RC
switch (msg->msg[2]) {
/*
* Play function, this message can have variable length
* depending on the specific play function that is used.
*/
case CEC_OP_UI_CMD_PLAY_FUNCTION:
if (msg->len == 2)
rc_keydown(adap->rc, RC_PROTO_CEC,
msg->msg[2], 0);
else
rc_keydown(adap->rc, RC_PROTO_CEC,
msg->msg[2] << 8 | msg->msg[3], 0);
break;
/*
* Other function messages that are not handled.
* Currently the RC framework does not allow to supply an
* additional parameter to a keypress. These "keys" contain
* other information such as channel number, an input number
* etc.
* For the time being these messages are not processed by the
* framework and are simply forwarded to the user space.
*/
case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
case CEC_OP_UI_CMD_TUNE_FUNCTION:
case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
break;
default:
rc_keydown(adap->rc, RC_PROTO_CEC, msg->msg[2], 0);
break;
}
#endif
break;
case CEC_MSG_USER_CONTROL_RELEASED:
if (!(adap->capabilities & CEC_CAP_RC) ||
!(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
break;
#ifdef CONFIG_MEDIA_CEC_RC
rc_keyup(adap->rc);
#endif
break;
/*
* The remaining messages are only processed if the passthrough mode
* is off.
*/
case CEC_MSG_GET_CEC_VERSION:
cec_msg_cec_version(&tx_cec_msg, adap->log_addrs.cec_version);
return cec_transmit_msg(adap, &tx_cec_msg, false);
case CEC_MSG_GIVE_PHYSICAL_ADDR:
/* Do nothing for CEC switches using addr 15 */
if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH && dest_laddr == 15)
return 0;
cec_msg_report_physical_addr(&tx_cec_msg, adap->phys_addr, devtype);
return cec_transmit_msg(adap, &tx_cec_msg, false);
case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
if (adap->log_addrs.vendor_id == CEC_VENDOR_ID_NONE)
return cec_feature_abort(adap, msg);
cec_msg_device_vendor_id(&tx_cec_msg, adap->log_addrs.vendor_id);
return cec_transmit_msg(adap, &tx_cec_msg, false);
case CEC_MSG_ABORT:
/* Do nothing for CEC switches */
if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH)
return 0;
return cec_feature_refused(adap, msg);
case CEC_MSG_GIVE_OSD_NAME: {
if (adap->log_addrs.osd_name[0] == 0)
return cec_feature_abort(adap, msg);
cec_msg_set_osd_name(&tx_cec_msg, adap->log_addrs.osd_name);
return cec_transmit_msg(adap, &tx_cec_msg, false);
}
case CEC_MSG_GIVE_FEATURES:
if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
return cec_feature_abort(adap, msg);
cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
return cec_transmit_msg(adap, &tx_cec_msg, false);
default:
/*
* Unprocessed messages are aborted if userspace isn't doing
* any processing either.
*/
if (!is_broadcast && !is_reply && !adap->follower_cnt &&
!adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
return cec_feature_abort(adap, msg);
break;
}
skip_processing:
/* If this was a reply, then we're done, unless otherwise specified */
if (is_reply && !(msg->flags & CEC_MSG_FL_REPLY_TO_FOLLOWERS))
return 0;
/*
* Send to the exclusive follower if there is one, otherwise send
* to all followers.
*/
if (adap->cec_follower)
cec_queue_msg_fh(adap->cec_follower, msg);
else
cec_queue_msg_followers(adap, msg);
return 0;
}
/*
* Helper functions to keep track of the 'monitor all' use count.
*
* These functions are called with adap->lock held.
*/
int cec_monitor_all_cnt_inc(struct cec_adapter *adap)
{
int ret;
if (adap->monitor_all_cnt++)
return 0;
ret = cec_adap_enable(adap);
if (ret)
adap->monitor_all_cnt--;
return ret;
}
void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
{
if (WARN_ON(!adap->monitor_all_cnt))
return;
if (--adap->monitor_all_cnt)
return;
WARN_ON(call_op(adap, adap_monitor_all_enable, false));
cec_adap_enable(adap);
}
/*
* Helper functions to keep track of the 'monitor pin' use count.
*
* These functions are called with adap->lock held.
*/
int cec_monitor_pin_cnt_inc(struct cec_adapter *adap)
{
int ret;
if (adap->monitor_pin_cnt++)
return 0;
ret = cec_adap_enable(adap);
if (ret)
adap->monitor_pin_cnt--;
return ret;
}
void cec_monitor_pin_cnt_dec(struct cec_adapter *adap)
{
if (WARN_ON(!adap->monitor_pin_cnt))
return;
if (--adap->monitor_pin_cnt)
return;
WARN_ON(call_op(adap, adap_monitor_pin_enable, false));
cec_adap_enable(adap);
}
#ifdef CONFIG_DEBUG_FS
/*
* Log the current state of the CEC adapter.
* Very useful for debugging.
*/
int cec_adap_status(struct seq_file *file, void *priv)
{
struct cec_adapter *adap = dev_get_drvdata(file->private);
struct cec_data *data;
mutex_lock(&adap->lock);
seq_printf(file, "enabled: %d\n", adap->is_enabled);
seq_printf(file, "configured: %d\n", adap->is_configured);
seq_printf(file, "configuring: %d\n", adap->is_configuring);
seq_printf(file, "phys_addr: %x.%x.%x.%x\n",
cec_phys_addr_exp(adap->phys_addr));
seq_printf(file, "number of LAs: %d\n", adap->log_addrs.num_log_addrs);
seq_printf(file, "LA mask: 0x%04x\n", adap->log_addrs.log_addr_mask);
if (adap->cec_follower)
seq_printf(file, "has CEC follower%s\n",
adap->passthrough ? " (in passthrough mode)" : "");
if (adap->cec_initiator)
seq_puts(file, "has CEC initiator\n");
if (adap->monitor_all_cnt)
seq_printf(file, "file handles in Monitor All mode: %u\n",
adap->monitor_all_cnt);
if (adap->monitor_pin_cnt)
seq_printf(file, "file handles in Monitor Pin mode: %u\n",
adap->monitor_pin_cnt);
if (adap->tx_timeouts) {
seq_printf(file, "transmit timeouts: %u\n",
adap->tx_timeouts);
adap->tx_timeouts = 0;
}
data = adap->transmitting;
if (data)
seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
data->msg.len, data->msg.msg, data->msg.reply,
data->msg.timeout);
seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
list_for_each_entry(data, &adap->transmit_queue, list) {
seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
data->msg.len, data->msg.msg, data->msg.reply,
data->msg.timeout);
}
list_for_each_entry(data, &adap->wait_queue, list) {
seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
data->msg.len, data->msg.msg, data->msg.reply,
data->msg.timeout);
}
call_void_op(adap, adap_status, file);
mutex_unlock(&adap->lock);
return 0;
}
#endif
| linux-master | drivers/media/cec/core/cec-adap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cec-core.c - HDMI Consumer Electronics Control framework - Core
*
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/types.h>
#include "cec-priv.h"
#define CEC_NUM_DEVICES 256
#define CEC_NAME "cec"
/*
* 400 ms is the time it takes for one 16 byte message to be
* transferred and 5 is the maximum number of retries. Add
* another 100 ms as a margin. So if the transmit doesn't
* finish before that time something is really wrong and we
* have to time out.
*
* This is a sign that something it really wrong and a warning
* will be issued.
*/
#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
int cec_debug;
module_param_named(debug, cec_debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
static bool debug_phys_addr;
module_param(debug_phys_addr, bool, 0644);
MODULE_PARM_DESC(debug_phys_addr, "add CEC_CAP_PHYS_ADDR if set");
static dev_t cec_dev_t;
/* Active devices */
static DEFINE_MUTEX(cec_devnode_lock);
static DECLARE_BITMAP(cec_devnode_nums, CEC_NUM_DEVICES);
static struct dentry *top_cec_dir;
/* dev to cec_devnode */
#define to_cec_devnode(cd) container_of(cd, struct cec_devnode, dev)
int cec_get_device(struct cec_devnode *devnode)
{
/*
* Check if the cec device is available. This needs to be done with
* the devnode->lock held to prevent an open/unregister race:
* without the lock, the device could be unregistered and freed between
* the devnode->registered check and get_device() calls, leading to
* a crash.
*/
mutex_lock(&devnode->lock);
/*
* return ENXIO if the cec device has been removed
* already or if it is not registered anymore.
*/
if (!devnode->registered) {
mutex_unlock(&devnode->lock);
return -ENXIO;
}
/* and increase the device refcount */
get_device(&devnode->dev);
mutex_unlock(&devnode->lock);
return 0;
}
void cec_put_device(struct cec_devnode *devnode)
{
put_device(&devnode->dev);
}
/* Called when the last user of the cec device exits. */
static void cec_devnode_release(struct device *cd)
{
struct cec_devnode *devnode = to_cec_devnode(cd);
mutex_lock(&cec_devnode_lock);
/* Mark device node number as free */
clear_bit(devnode->minor, cec_devnode_nums);
mutex_unlock(&cec_devnode_lock);
cec_delete_adapter(to_cec_adapter(devnode));
}
static struct bus_type cec_bus_type = {
.name = CEC_NAME,
};
/*
* Register a cec device node
*
* The registration code assigns minor numbers and registers the new device node
* with the kernel. An error is returned if no free minor number can be found,
* or if the registration of the device node fails.
*
* Zero is returned on success.
*
* Note that if the cec_devnode_register call fails, the release() callback of
* the cec_devnode structure is *not* called, so the caller is responsible for
* freeing any data.
*/
static int __must_check cec_devnode_register(struct cec_devnode *devnode,
struct module *owner)
{
int minor;
int ret;
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
minor = find_first_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES);
if (minor == CEC_NUM_DEVICES) {
mutex_unlock(&cec_devnode_lock);
pr_err("could not get a free minor\n");
return -ENFILE;
}
set_bit(minor, cec_devnode_nums);
mutex_unlock(&cec_devnode_lock);
devnode->minor = minor;
devnode->dev.bus = &cec_bus_type;
devnode->dev.devt = MKDEV(MAJOR(cec_dev_t), minor);
devnode->dev.release = cec_devnode_release;
dev_set_name(&devnode->dev, "cec%d", devnode->minor);
device_initialize(&devnode->dev);
/* Part 2: Initialize and register the character device */
cdev_init(&devnode->cdev, &cec_devnode_fops);
devnode->cdev.owner = owner;
kobject_set_name(&devnode->cdev.kobj, "cec%d", devnode->minor);
devnode->registered = true;
ret = cdev_device_add(&devnode->cdev, &devnode->dev);
if (ret) {
devnode->registered = false;
pr_err("%s: cdev_device_add failed\n", __func__);
goto clr_bit;
}
return 0;
clr_bit:
mutex_lock(&cec_devnode_lock);
clear_bit(devnode->minor, cec_devnode_nums);
mutex_unlock(&cec_devnode_lock);
return ret;
}
/*
* Unregister a cec device node
*
* This unregisters the passed device. Future open calls will be met with
* errors.
*
* This function can safely be called if the device node has never been
* registered or has already been unregistered.
*/
static void cec_devnode_unregister(struct cec_adapter *adap)
{
struct cec_devnode *devnode = &adap->devnode;
struct cec_fh *fh;
mutex_lock(&devnode->lock);
/* Check if devnode was never registered or already unregistered */
if (!devnode->registered || devnode->unregistered) {
mutex_unlock(&devnode->lock);
return;
}
devnode->registered = false;
devnode->unregistered = true;
mutex_lock(&devnode->lock_fhs);
list_for_each_entry(fh, &devnode->fhs, list)
wake_up_interruptible(&fh->wait);
mutex_unlock(&devnode->lock_fhs);
mutex_unlock(&devnode->lock);
mutex_lock(&adap->lock);
__cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
__cec_s_log_addrs(adap, NULL, false);
// Disable the adapter (since adap->devnode.unregistered is true)
cec_adap_enable(adap);
mutex_unlock(&adap->lock);
cdev_device_del(&devnode->cdev, &devnode->dev);
put_device(&devnode->dev);
}
#ifdef CONFIG_DEBUG_FS
static ssize_t cec_error_inj_write(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
struct seq_file *sf = file->private_data;
struct cec_adapter *adap = sf->private;
char *buf;
char *line;
char *p;
buf = memdup_user_nul(ubuf, min_t(size_t, PAGE_SIZE, count));
if (IS_ERR(buf))
return PTR_ERR(buf);
p = buf;
while (p && *p) {
p = skip_spaces(p);
line = strsep(&p, "\n");
if (!*line || *line == '#')
continue;
if (!call_op(adap, error_inj_parse_line, line)) {
kfree(buf);
return -EINVAL;
}
}
kfree(buf);
return count;
}
static int cec_error_inj_show(struct seq_file *sf, void *unused)
{
struct cec_adapter *adap = sf->private;
return call_op(adap, error_inj_show, sf);
}
static int cec_error_inj_open(struct inode *inode, struct file *file)
{
return single_open(file, cec_error_inj_show, inode->i_private);
}
static const struct file_operations cec_error_inj_fops = {
.open = cec_error_inj_open,
.write = cec_error_inj_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
void *priv, const char *name, u32 caps,
u8 available_las)
{
struct cec_adapter *adap;
int res;
#ifndef CONFIG_MEDIA_CEC_RC
caps &= ~CEC_CAP_RC;
#endif
if (WARN_ON(!caps))
return ERR_PTR(-EINVAL);
if (WARN_ON(!ops))
return ERR_PTR(-EINVAL);
if (WARN_ON(!available_las || available_las > CEC_MAX_LOG_ADDRS))
return ERR_PTR(-EINVAL);
adap = kzalloc(sizeof(*adap), GFP_KERNEL);
if (!adap)
return ERR_PTR(-ENOMEM);
strscpy(adap->name, name, sizeof(adap->name));
adap->phys_addr = CEC_PHYS_ADDR_INVALID;
adap->cec_pin_is_high = true;
adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
adap->capabilities = caps;
if (debug_phys_addr)
adap->capabilities |= CEC_CAP_PHYS_ADDR;
adap->needs_hpd = caps & CEC_CAP_NEEDS_HPD;
adap->available_log_addrs = available_las;
adap->sequence = 0;
adap->ops = ops;
adap->priv = priv;
mutex_init(&adap->lock);
INIT_LIST_HEAD(&adap->transmit_queue);
INIT_LIST_HEAD(&adap->wait_queue);
init_waitqueue_head(&adap->kthread_waitq);
/* adap->devnode initialization */
INIT_LIST_HEAD(&adap->devnode.fhs);
mutex_init(&adap->devnode.lock_fhs);
mutex_init(&adap->devnode.lock);
adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
if (IS_ERR(adap->kthread)) {
pr_err("cec-%s: kernel_thread() failed\n", name);
res = PTR_ERR(adap->kthread);
kfree(adap);
return ERR_PTR(res);
}
#ifdef CONFIG_MEDIA_CEC_RC
if (!(caps & CEC_CAP_RC))
return adap;
/* Prepare the RC input device */
adap->rc = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!adap->rc) {
pr_err("cec-%s: failed to allocate memory for rc_dev\n",
name);
kthread_stop(adap->kthread);
kfree(adap);
return ERR_PTR(-ENOMEM);
}
snprintf(adap->input_phys, sizeof(adap->input_phys),
"%s/input0", adap->name);
adap->rc->device_name = adap->name;
adap->rc->input_phys = adap->input_phys;
adap->rc->input_id.bustype = BUS_CEC;
adap->rc->input_id.vendor = 0;
adap->rc->input_id.product = 0;
adap->rc->input_id.version = 1;
adap->rc->driver_name = CEC_NAME;
adap->rc->allowed_protocols = RC_PROTO_BIT_CEC;
adap->rc->priv = adap;
adap->rc->map_name = RC_MAP_CEC;
adap->rc->timeout = MS_TO_US(550);
#endif
return adap;
}
EXPORT_SYMBOL_GPL(cec_allocate_adapter);
int cec_register_adapter(struct cec_adapter *adap,
struct device *parent)
{
int res;
if (IS_ERR_OR_NULL(adap))
return 0;
if (WARN_ON(!parent))
return -EINVAL;
adap->owner = parent->driver->owner;
adap->devnode.dev.parent = parent;
if (!adap->xfer_timeout_ms)
adap->xfer_timeout_ms = CEC_XFER_TIMEOUT_MS;
#ifdef CONFIG_MEDIA_CEC_RC
if (adap->capabilities & CEC_CAP_RC) {
adap->rc->dev.parent = parent;
res = rc_register_device(adap->rc);
if (res) {
pr_err("cec-%s: failed to prepare input device\n",
adap->name);
rc_free_device(adap->rc);
adap->rc = NULL;
return res;
}
}
#endif
res = cec_devnode_register(&adap->devnode, adap->owner);
if (res) {
#ifdef CONFIG_MEDIA_CEC_RC
/* Note: rc_unregister also calls rc_free */
rc_unregister_device(adap->rc);
adap->rc = NULL;
#endif
return res;
}
dev_set_drvdata(&adap->devnode.dev, adap);
#ifdef CONFIG_DEBUG_FS
if (!top_cec_dir)
return 0;
adap->cec_dir = debugfs_create_dir(dev_name(&adap->devnode.dev),
top_cec_dir);
debugfs_create_devm_seqfile(&adap->devnode.dev, "status", adap->cec_dir,
cec_adap_status);
if (!adap->ops->error_inj_show || !adap->ops->error_inj_parse_line)
return 0;
debugfs_create_file("error-inj", 0644, adap->cec_dir, adap,
&cec_error_inj_fops);
#endif
return 0;
}
EXPORT_SYMBOL_GPL(cec_register_adapter);
void cec_unregister_adapter(struct cec_adapter *adap)
{
if (IS_ERR_OR_NULL(adap))
return;
#ifdef CONFIG_MEDIA_CEC_RC
/* Note: rc_unregister also calls rc_free */
rc_unregister_device(adap->rc);
adap->rc = NULL;
#endif
debugfs_remove_recursive(adap->cec_dir);
#ifdef CONFIG_CEC_NOTIFIER
cec_notifier_cec_adap_unregister(adap->notifier, adap);
#endif
cec_devnode_unregister(adap);
}
EXPORT_SYMBOL_GPL(cec_unregister_adapter);
void cec_delete_adapter(struct cec_adapter *adap)
{
if (IS_ERR_OR_NULL(adap))
return;
if (adap->kthread_config)
kthread_stop(adap->kthread_config);
kthread_stop(adap->kthread);
if (adap->ops->adap_free)
adap->ops->adap_free(adap);
#ifdef CONFIG_MEDIA_CEC_RC
rc_free_device(adap->rc);
#endif
kfree(adap);
}
EXPORT_SYMBOL_GPL(cec_delete_adapter);
/*
* Initialise cec for linux
*/
static int __init cec_devnode_init(void)
{
int ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES, CEC_NAME);
if (ret < 0) {
pr_warn("cec: unable to allocate major\n");
return ret;
}
#ifdef CONFIG_DEBUG_FS
top_cec_dir = debugfs_create_dir("cec", NULL);
if (IS_ERR_OR_NULL(top_cec_dir)) {
pr_warn("cec: Failed to create debugfs cec dir\n");
top_cec_dir = NULL;
}
#endif
ret = bus_register(&cec_bus_type);
if (ret < 0) {
unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
pr_warn("cec: bus_register failed\n");
return -EIO;
}
return 0;
}
static void __exit cec_devnode_exit(void)
{
debugfs_remove_recursive(top_cec_dir);
bus_unregister(&cec_bus_type);
unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
}
subsys_initcall(cec_devnode_init);
module_exit(cec_devnode_exit)
MODULE_AUTHOR("Hans Verkuil <[email protected]>");
MODULE_DESCRIPTION("Device node registration for cec drivers");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/cec/core/cec-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/sched/types.h>
#include <media/cec-pin.h>
#include "cec-pin-priv.h"
struct cec_error_inj_cmd {
unsigned int mode_offset;
int arg_idx;
const char *cmd;
};
static const struct cec_error_inj_cmd cec_error_inj_cmds[] = {
{ CEC_ERROR_INJ_RX_NACK_OFFSET, -1, "rx-nack" },
{ CEC_ERROR_INJ_RX_LOW_DRIVE_OFFSET,
CEC_ERROR_INJ_RX_LOW_DRIVE_ARG_IDX, "rx-low-drive" },
{ CEC_ERROR_INJ_RX_ADD_BYTE_OFFSET, -1, "rx-add-byte" },
{ CEC_ERROR_INJ_RX_REMOVE_BYTE_OFFSET, -1, "rx-remove-byte" },
{ CEC_ERROR_INJ_RX_ARB_LOST_OFFSET,
CEC_ERROR_INJ_RX_ARB_LOST_ARG_IDX, "rx-arb-lost" },
{ CEC_ERROR_INJ_TX_NO_EOM_OFFSET, -1, "tx-no-eom" },
{ CEC_ERROR_INJ_TX_EARLY_EOM_OFFSET, -1, "tx-early-eom" },
{ CEC_ERROR_INJ_TX_ADD_BYTES_OFFSET,
CEC_ERROR_INJ_TX_ADD_BYTES_ARG_IDX, "tx-add-bytes" },
{ CEC_ERROR_INJ_TX_REMOVE_BYTE_OFFSET, -1, "tx-remove-byte" },
{ CEC_ERROR_INJ_TX_SHORT_BIT_OFFSET,
CEC_ERROR_INJ_TX_SHORT_BIT_ARG_IDX, "tx-short-bit" },
{ CEC_ERROR_INJ_TX_LONG_BIT_OFFSET,
CEC_ERROR_INJ_TX_LONG_BIT_ARG_IDX, "tx-long-bit" },
{ CEC_ERROR_INJ_TX_CUSTOM_BIT_OFFSET,
CEC_ERROR_INJ_TX_CUSTOM_BIT_ARG_IDX, "tx-custom-bit" },
{ CEC_ERROR_INJ_TX_SHORT_START_OFFSET, -1, "tx-short-start" },
{ CEC_ERROR_INJ_TX_LONG_START_OFFSET, -1, "tx-long-start" },
{ CEC_ERROR_INJ_TX_CUSTOM_START_OFFSET, -1, "tx-custom-start" },
{ CEC_ERROR_INJ_TX_LAST_BIT_OFFSET,
CEC_ERROR_INJ_TX_LAST_BIT_ARG_IDX, "tx-last-bit" },
{ CEC_ERROR_INJ_TX_LOW_DRIVE_OFFSET,
CEC_ERROR_INJ_TX_LOW_DRIVE_ARG_IDX, "tx-low-drive" },
{ 0, -1, NULL }
};
u16 cec_pin_rx_error_inj(struct cec_pin *pin)
{
u16 cmd = CEC_ERROR_INJ_OP_ANY;
/* Only when 18 bits have been received do we have a valid cmd */
if (!(pin->error_inj[cmd] & CEC_ERROR_INJ_RX_MASK) &&
pin->rx_bit >= 18)
cmd = pin->rx_msg.msg[1];
return (pin->error_inj[cmd] & CEC_ERROR_INJ_RX_MASK) ? cmd :
CEC_ERROR_INJ_OP_ANY;
}
u16 cec_pin_tx_error_inj(struct cec_pin *pin)
{
u16 cmd = CEC_ERROR_INJ_OP_ANY;
if (!(pin->error_inj[cmd] & CEC_ERROR_INJ_TX_MASK) &&
pin->tx_msg.len > 1)
cmd = pin->tx_msg.msg[1];
return (pin->error_inj[cmd] & CEC_ERROR_INJ_TX_MASK) ? cmd :
CEC_ERROR_INJ_OP_ANY;
}
bool cec_pin_error_inj_parse_line(struct cec_adapter *adap, char *line)
{
static const char *delims = " \t\r";
struct cec_pin *pin = adap->pin;
unsigned int i;
bool has_pos = false;
char *p = line;
char *token;
char *comma;
u64 *error;
u8 *args;
bool has_op;
u8 op;
u8 mode;
u8 pos;
p = skip_spaces(p);
token = strsep(&p, delims);
if (!strcmp(token, "clear")) {
memset(pin->error_inj, 0, sizeof(pin->error_inj));
pin->rx_toggle = pin->tx_toggle = false;
pin->tx_ignore_nack_until_eom = false;
pin->tx_custom_pulse = false;
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
return true;
}
if (!strcmp(token, "rx-clear")) {
for (i = 0; i <= CEC_ERROR_INJ_OP_ANY; i++)
pin->error_inj[i] &= ~CEC_ERROR_INJ_RX_MASK;
pin->rx_toggle = false;
return true;
}
if (!strcmp(token, "tx-clear")) {
for (i = 0; i <= CEC_ERROR_INJ_OP_ANY; i++)
pin->error_inj[i] &= ~CEC_ERROR_INJ_TX_MASK;
pin->tx_toggle = false;
pin->tx_ignore_nack_until_eom = false;
pin->tx_custom_pulse = false;
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
return true;
}
if (!strcmp(token, "tx-ignore-nack-until-eom")) {
pin->tx_ignore_nack_until_eom = true;
return true;
}
if (!strcmp(token, "tx-custom-pulse")) {
pin->tx_custom_pulse = true;
cec_pin_start_timer(pin);
return true;
}
if (!p)
return false;
p = skip_spaces(p);
if (!strcmp(token, "tx-custom-low-usecs")) {
u32 usecs;
if (kstrtou32(p, 0, &usecs) || usecs > 10000000)
return false;
pin->tx_custom_low_usecs = usecs;
return true;
}
if (!strcmp(token, "tx-custom-high-usecs")) {
u32 usecs;
if (kstrtou32(p, 0, &usecs) || usecs > 10000000)
return false;
pin->tx_custom_high_usecs = usecs;
return true;
}
comma = strchr(token, ',');
if (comma)
*comma++ = '\0';
if (!strcmp(token, "any")) {
has_op = false;
error = pin->error_inj + CEC_ERROR_INJ_OP_ANY;
args = pin->error_inj_args[CEC_ERROR_INJ_OP_ANY];
} else if (!kstrtou8(token, 0, &op)) {
has_op = true;
error = pin->error_inj + op;
args = pin->error_inj_args[op];
} else {
return false;
}
mode = CEC_ERROR_INJ_MODE_ONCE;
if (comma) {
if (!strcmp(comma, "off"))
mode = CEC_ERROR_INJ_MODE_OFF;
else if (!strcmp(comma, "once"))
mode = CEC_ERROR_INJ_MODE_ONCE;
else if (!strcmp(comma, "always"))
mode = CEC_ERROR_INJ_MODE_ALWAYS;
else if (!strcmp(comma, "toggle"))
mode = CEC_ERROR_INJ_MODE_TOGGLE;
else
return false;
}
token = strsep(&p, delims);
if (p) {
p = skip_spaces(p);
has_pos = !kstrtou8(p, 0, &pos);
}
if (!strcmp(token, "clear")) {
*error = 0;
return true;
}
if (!strcmp(token, "rx-clear")) {
*error &= ~CEC_ERROR_INJ_RX_MASK;
return true;
}
if (!strcmp(token, "tx-clear")) {
*error &= ~CEC_ERROR_INJ_TX_MASK;
return true;
}
for (i = 0; cec_error_inj_cmds[i].cmd; i++) {
const char *cmd = cec_error_inj_cmds[i].cmd;
unsigned int mode_offset;
u64 mode_mask;
int arg_idx;
bool is_bit_pos = true;
if (strcmp(token, cmd))
continue;
mode_offset = cec_error_inj_cmds[i].mode_offset;
mode_mask = CEC_ERROR_INJ_MODE_MASK << mode_offset;
arg_idx = cec_error_inj_cmds[i].arg_idx;
if (mode_offset == CEC_ERROR_INJ_RX_ARB_LOST_OFFSET) {
if (has_op)
return false;
if (!has_pos)
pos = 0x0f;
is_bit_pos = false;
} else if (mode_offset == CEC_ERROR_INJ_TX_ADD_BYTES_OFFSET) {
if (!has_pos || !pos)
return false;
is_bit_pos = false;
}
if (arg_idx >= 0 && is_bit_pos) {
if (!has_pos || pos >= 160)
return false;
if (has_op && pos < 10 + 8)
return false;
/* Invalid bit position may not be the Ack bit */
if ((mode_offset == CEC_ERROR_INJ_TX_SHORT_BIT_OFFSET ||
mode_offset == CEC_ERROR_INJ_TX_LONG_BIT_OFFSET ||
mode_offset == CEC_ERROR_INJ_TX_CUSTOM_BIT_OFFSET) &&
(pos % 10) == 9)
return false;
}
*error &= ~mode_mask;
*error |= (u64)mode << mode_offset;
if (arg_idx >= 0)
args[arg_idx] = pos;
return true;
}
return false;
}
static void cec_pin_show_cmd(struct seq_file *sf, u32 cmd, u8 mode)
{
if (cmd == CEC_ERROR_INJ_OP_ANY)
seq_puts(sf, "any,");
else
seq_printf(sf, "0x%02x,", cmd);
switch (mode) {
case CEC_ERROR_INJ_MODE_ONCE:
seq_puts(sf, "once ");
break;
case CEC_ERROR_INJ_MODE_ALWAYS:
seq_puts(sf, "always ");
break;
case CEC_ERROR_INJ_MODE_TOGGLE:
seq_puts(sf, "toggle ");
break;
default:
seq_puts(sf, "off ");
break;
}
}
int cec_pin_error_inj_show(struct cec_adapter *adap, struct seq_file *sf)
{
struct cec_pin *pin = adap->pin;
unsigned int i, j;
seq_puts(sf, "# Clear error injections:\n");
seq_puts(sf, "# clear clear all rx and tx error injections\n");
seq_puts(sf, "# rx-clear clear all rx error injections\n");
seq_puts(sf, "# tx-clear clear all tx error injections\n");
seq_puts(sf, "# <op> clear clear all rx and tx error injections for <op>\n");
seq_puts(sf, "# <op> rx-clear clear all rx error injections for <op>\n");
seq_puts(sf, "# <op> tx-clear clear all tx error injections for <op>\n");
seq_puts(sf, "#\n");
seq_puts(sf, "# RX error injection:\n");
seq_puts(sf, "# <op>[,<mode>] rx-nack NACK the message instead of sending an ACK\n");
seq_puts(sf, "# <op>[,<mode>] rx-low-drive <bit> force a low-drive condition at this bit position\n");
seq_puts(sf, "# <op>[,<mode>] rx-add-byte add a spurious byte to the received CEC message\n");
seq_puts(sf, "# <op>[,<mode>] rx-remove-byte remove the last byte from the received CEC message\n");
seq_puts(sf, "# any[,<mode>] rx-arb-lost [<poll>] generate a POLL message to trigger an arbitration lost\n");
seq_puts(sf, "#\n");
seq_puts(sf, "# TX error injection settings:\n");
seq_puts(sf, "# tx-ignore-nack-until-eom ignore early NACKs until EOM\n");
seq_puts(sf, "# tx-custom-low-usecs <usecs> define the 'low' time for the custom pulse\n");
seq_puts(sf, "# tx-custom-high-usecs <usecs> define the 'high' time for the custom pulse\n");
seq_puts(sf, "# tx-custom-pulse transmit the custom pulse once the bus is idle\n");
seq_puts(sf, "#\n");
seq_puts(sf, "# TX error injection:\n");
seq_puts(sf, "# <op>[,<mode>] tx-no-eom don't set the EOM bit\n");
seq_puts(sf, "# <op>[,<mode>] tx-early-eom set the EOM bit one byte too soon\n");
seq_puts(sf, "# <op>[,<mode>] tx-add-bytes <num> append <num> (1-255) spurious bytes to the message\n");
seq_puts(sf, "# <op>[,<mode>] tx-remove-byte drop the last byte from the message\n");
seq_puts(sf, "# <op>[,<mode>] tx-short-bit <bit> make this bit shorter than allowed\n");
seq_puts(sf, "# <op>[,<mode>] tx-long-bit <bit> make this bit longer than allowed\n");
seq_puts(sf, "# <op>[,<mode>] tx-custom-bit <bit> send the custom pulse instead of this bit\n");
seq_puts(sf, "# <op>[,<mode>] tx-short-start send a start pulse that's too short\n");
seq_puts(sf, "# <op>[,<mode>] tx-long-start send a start pulse that's too long\n");
seq_puts(sf, "# <op>[,<mode>] tx-custom-start send the custom pulse instead of the start pulse\n");
seq_puts(sf, "# <op>[,<mode>] tx-last-bit <bit> stop sending after this bit\n");
seq_puts(sf, "# <op>[,<mode>] tx-low-drive <bit> force a low-drive condition at this bit position\n");
seq_puts(sf, "#\n");
seq_puts(sf, "# <op> CEC message opcode (0-255) or 'any'\n");
seq_puts(sf, "# <mode> 'once' (default), 'always', 'toggle' or 'off'\n");
seq_puts(sf, "# <bit> CEC message bit (0-159)\n");
seq_puts(sf, "# 10 bits per 'byte': bits 0-7: data, bit 8: EOM, bit 9: ACK\n");
seq_puts(sf, "# <poll> CEC poll message used to test arbitration lost (0x00-0xff, default 0x0f)\n");
seq_puts(sf, "# <usecs> microseconds (0-10000000, default 1000)\n");
seq_puts(sf, "\nclear\n");
for (i = 0; i < ARRAY_SIZE(pin->error_inj); i++) {
u64 e = pin->error_inj[i];
for (j = 0; cec_error_inj_cmds[j].cmd; j++) {
const char *cmd = cec_error_inj_cmds[j].cmd;
unsigned int mode;
unsigned int mode_offset;
int arg_idx;
mode_offset = cec_error_inj_cmds[j].mode_offset;
arg_idx = cec_error_inj_cmds[j].arg_idx;
mode = (e >> mode_offset) & CEC_ERROR_INJ_MODE_MASK;
if (!mode)
continue;
cec_pin_show_cmd(sf, i, mode);
seq_puts(sf, cmd);
if (arg_idx >= 0)
seq_printf(sf, " %u",
pin->error_inj_args[i][arg_idx]);
seq_puts(sf, "\n");
}
}
if (pin->tx_ignore_nack_until_eom)
seq_puts(sf, "tx-ignore-nack-until-eom\n");
if (pin->tx_custom_pulse)
seq_puts(sf, "tx-custom-pulse\n");
if (pin->tx_custom_low_usecs != CEC_TIM_CUSTOM_DEFAULT)
seq_printf(sf, "tx-custom-low-usecs %u\n",
pin->tx_custom_low_usecs);
if (pin->tx_custom_high_usecs != CEC_TIM_CUSTOM_DEFAULT)
seq_printf(sf, "tx-custom-high-usecs %u\n",
pin->tx_custom_high_usecs);
return 0;
}
| linux-master | drivers/media/cec/core/cec-pin-error-inj.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cec-notifier.c - notify CEC drivers of physical address changes
*
* Copyright 2016 Russell King.
* Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/of_platform.h>
#include <media/cec.h>
#include <media/cec-notifier.h>
#include <drm/drm_edid.h>
struct cec_notifier {
struct mutex lock;
struct list_head head;
struct kref kref;
struct device *hdmi_dev;
struct cec_connector_info conn_info;
const char *port_name;
struct cec_adapter *cec_adap;
u16 phys_addr;
};
static LIST_HEAD(cec_notifiers);
static DEFINE_MUTEX(cec_notifiers_lock);
/**
* cec_notifier_get_conn - find or create a new cec_notifier for the given
* device and connector tuple.
* @hdmi_dev: device that sends the events.
* @port_name: the connector name from which the event occurs
*
* If a notifier for device @dev already exists, then increase the refcount
* and return that notifier.
*
* If it doesn't exist, then allocate a new notifier struct and return a
* pointer to that new struct.
*
* Return NULL if the memory could not be allocated.
*/
static struct cec_notifier *
cec_notifier_get_conn(struct device *hdmi_dev, const char *port_name)
{
struct cec_notifier *n;
mutex_lock(&cec_notifiers_lock);
list_for_each_entry(n, &cec_notifiers, head) {
if (n->hdmi_dev == hdmi_dev &&
(!port_name ||
(n->port_name && !strcmp(n->port_name, port_name)))) {
kref_get(&n->kref);
mutex_unlock(&cec_notifiers_lock);
return n;
}
}
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n)
goto unlock;
n->hdmi_dev = hdmi_dev;
if (port_name) {
n->port_name = kstrdup(port_name, GFP_KERNEL);
if (!n->port_name) {
kfree(n);
n = NULL;
goto unlock;
}
}
n->phys_addr = CEC_PHYS_ADDR_INVALID;
mutex_init(&n->lock);
kref_init(&n->kref);
list_add_tail(&n->head, &cec_notifiers);
unlock:
mutex_unlock(&cec_notifiers_lock);
return n;
}
static void cec_notifier_release(struct kref *kref)
{
struct cec_notifier *n =
container_of(kref, struct cec_notifier, kref);
list_del(&n->head);
kfree(n->port_name);
kfree(n);
}
static void cec_notifier_put(struct cec_notifier *n)
{
mutex_lock(&cec_notifiers_lock);
kref_put(&n->kref, cec_notifier_release);
mutex_unlock(&cec_notifiers_lock);
}
struct cec_notifier *
cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name,
const struct cec_connector_info *conn_info)
{
struct cec_notifier *n = cec_notifier_get_conn(hdmi_dev, port_name);
if (!n)
return n;
mutex_lock(&n->lock);
n->phys_addr = CEC_PHYS_ADDR_INVALID;
if (conn_info)
n->conn_info = *conn_info;
else
memset(&n->conn_info, 0, sizeof(n->conn_info));
if (n->cec_adap) {
if (!n->cec_adap->adap_controls_phys_addr)
cec_phys_addr_invalidate(n->cec_adap);
cec_s_conn_info(n->cec_adap, conn_info);
}
mutex_unlock(&n->lock);
return n;
}
EXPORT_SYMBOL_GPL(cec_notifier_conn_register);
void cec_notifier_conn_unregister(struct cec_notifier *n)
{
if (!n)
return;
mutex_lock(&n->lock);
memset(&n->conn_info, 0, sizeof(n->conn_info));
n->phys_addr = CEC_PHYS_ADDR_INVALID;
if (n->cec_adap) {
if (!n->cec_adap->adap_controls_phys_addr)
cec_phys_addr_invalidate(n->cec_adap);
cec_s_conn_info(n->cec_adap, NULL);
}
mutex_unlock(&n->lock);
cec_notifier_put(n);
}
EXPORT_SYMBOL_GPL(cec_notifier_conn_unregister);
struct cec_notifier *
cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name,
struct cec_adapter *adap)
{
struct cec_notifier *n;
if (WARN_ON(!adap))
return NULL;
n = cec_notifier_get_conn(hdmi_dev, port_name);
if (!n)
return n;
mutex_lock(&n->lock);
n->cec_adap = adap;
adap->conn_info = n->conn_info;
adap->notifier = n;
if (!adap->adap_controls_phys_addr)
cec_s_phys_addr(adap, n->phys_addr, false);
mutex_unlock(&n->lock);
return n;
}
EXPORT_SYMBOL_GPL(cec_notifier_cec_adap_register);
void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
struct cec_adapter *adap)
{
if (!n)
return;
mutex_lock(&n->lock);
adap->notifier = NULL;
n->cec_adap = NULL;
mutex_unlock(&n->lock);
cec_notifier_put(n);
}
EXPORT_SYMBOL_GPL(cec_notifier_cec_adap_unregister);
void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
{
if (n == NULL)
return;
mutex_lock(&n->lock);
n->phys_addr = pa;
if (n->cec_adap && !n->cec_adap->adap_controls_phys_addr)
cec_s_phys_addr(n->cec_adap, n->phys_addr, false);
mutex_unlock(&n->lock);
}
EXPORT_SYMBOL_GPL(cec_notifier_set_phys_addr);
void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
const struct edid *edid)
{
u16 pa = CEC_PHYS_ADDR_INVALID;
if (n == NULL)
return;
if (edid && edid->extensions)
pa = cec_get_edid_phys_addr((const u8 *)edid,
EDID_LENGTH * (edid->extensions + 1), NULL);
cec_notifier_set_phys_addr(n, pa);
}
EXPORT_SYMBOL_GPL(cec_notifier_set_phys_addr_from_edid);
struct device *cec_notifier_parse_hdmi_phandle(struct device *dev)
{
struct platform_device *hdmi_pdev;
struct device *hdmi_dev = NULL;
struct device_node *np;
np = of_parse_phandle(dev->of_node, "hdmi-phandle", 0);
if (!np) {
dev_err(dev, "Failed to find HDMI node in device tree\n");
return ERR_PTR(-ENODEV);
}
hdmi_pdev = of_find_device_by_node(np);
if (hdmi_pdev)
hdmi_dev = &hdmi_pdev->dev;
#if IS_REACHABLE(CONFIG_I2C)
if (!hdmi_dev) {
struct i2c_client *hdmi_client = of_find_i2c_device_by_node(np);
if (hdmi_client)
hdmi_dev = &hdmi_client->dev;
}
#endif
of_node_put(np);
if (!hdmi_dev)
return ERR_PTR(-EPROBE_DEFER);
/*
* Note that the device struct is only used as a key into the
* cec_notifiers list, it is never actually accessed.
* So we decrement the reference here so we don't leak
* memory.
*/
put_device(hdmi_dev);
return hdmi_dev;
}
EXPORT_SYMBOL_GPL(cec_notifier_parse_hdmi_phandle);
| linux-master | drivers/media/cec/core/cec-notifier.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cec-api.c - HDMI Consumer Electronics Control framework - API
*
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/version.h>
#include <media/cec-pin.h>
#include "cec-priv.h"
#include "cec-pin-priv.h"
static inline struct cec_devnode *cec_devnode_data(struct file *filp)
{
struct cec_fh *fh = filp->private_data;
return &fh->adap->devnode;
}
/* CEC file operations */
static __poll_t cec_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct cec_fh *fh = filp->private_data;
struct cec_adapter *adap = fh->adap;
__poll_t res = 0;
poll_wait(filp, &fh->wait, poll);
if (!cec_is_registered(adap))
return EPOLLERR | EPOLLHUP | EPOLLPRI;
mutex_lock(&adap->lock);
if (adap->is_configured &&
adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
res |= EPOLLOUT | EPOLLWRNORM;
if (fh->queued_msgs)
res |= EPOLLIN | EPOLLRDNORM;
if (fh->total_queued_events)
res |= EPOLLPRI;
mutex_unlock(&adap->lock);
return res;
}
static bool cec_is_busy(const struct cec_adapter *adap,
const struct cec_fh *fh)
{
bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
/*
* Exclusive initiators and followers can always access the CEC adapter
*/
if (valid_initiator || valid_follower)
return false;
/*
* All others can only access the CEC adapter if there is no
* exclusive initiator and they are in INITIATOR mode.
*/
return adap->cec_initiator ||
fh->mode_initiator == CEC_MODE_NO_INITIATOR;
}
static long cec_adap_g_caps(struct cec_adapter *adap,
struct cec_caps __user *parg)
{
struct cec_caps caps = {};
strscpy(caps.driver, adap->devnode.dev.parent->driver->name,
sizeof(caps.driver));
strscpy(caps.name, adap->name, sizeof(caps.name));
caps.available_log_addrs = adap->available_log_addrs;
caps.capabilities = adap->capabilities;
caps.version = LINUX_VERSION_CODE;
if (copy_to_user(parg, &caps, sizeof(caps)))
return -EFAULT;
return 0;
}
static long cec_adap_g_phys_addr(struct cec_adapter *adap,
__u16 __user *parg)
{
u16 phys_addr;
mutex_lock(&adap->lock);
phys_addr = adap->phys_addr;
mutex_unlock(&adap->lock);
if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
return -EFAULT;
return 0;
}
static int cec_validate_phys_addr(u16 phys_addr)
{
int i;
if (phys_addr == CEC_PHYS_ADDR_INVALID)
return 0;
for (i = 0; i < 16; i += 4)
if (phys_addr & (0xf << i))
break;
if (i == 16)
return 0;
for (i += 4; i < 16; i += 4)
if ((phys_addr & (0xf << i)) == 0)
return -EINVAL;
return 0;
}
static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
bool block, __u16 __user *parg)
{
u16 phys_addr;
long err;
if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
return -ENOTTY;
if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
return -EFAULT;
err = cec_validate_phys_addr(phys_addr);
if (err)
return err;
mutex_lock(&adap->lock);
if (cec_is_busy(adap, fh))
err = -EBUSY;
else
__cec_s_phys_addr(adap, phys_addr, block);
mutex_unlock(&adap->lock);
return err;
}
static long cec_adap_g_log_addrs(struct cec_adapter *adap,
struct cec_log_addrs __user *parg)
{
struct cec_log_addrs log_addrs;
mutex_lock(&adap->lock);
/*
* We use memcpy here instead of assignment since there is a
* hole at the end of struct cec_log_addrs that an assignment
* might ignore. So when we do copy_to_user() we could leak
* one byte of memory.
*/
memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
if (!adap->is_configured)
memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
sizeof(log_addrs.log_addr));
mutex_unlock(&adap->lock);
if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
return -EFAULT;
return 0;
}
static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
bool block, struct cec_log_addrs __user *parg)
{
struct cec_log_addrs log_addrs;
long err = -EBUSY;
if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
return -ENOTTY;
if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
return -EFAULT;
log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
CEC_LOG_ADDRS_FL_CDC_ONLY;
mutex_lock(&adap->lock);
if (!adap->is_configuring &&
(!log_addrs.num_log_addrs || !adap->is_configured) &&
!cec_is_busy(adap, fh)) {
err = __cec_s_log_addrs(adap, &log_addrs, block);
if (!err)
log_addrs = adap->log_addrs;
}
mutex_unlock(&adap->lock);
if (err)
return err;
if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
return -EFAULT;
return 0;
}
static long cec_adap_g_connector_info(struct cec_adapter *adap,
struct cec_log_addrs __user *parg)
{
int ret = 0;
if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
return -ENOTTY;
mutex_lock(&adap->lock);
if (copy_to_user(parg, &adap->conn_info, sizeof(adap->conn_info)))
ret = -EFAULT;
mutex_unlock(&adap->lock);
return ret;
}
static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
bool block, struct cec_msg __user *parg)
{
struct cec_msg msg = {};
long err = 0;
if (!(adap->capabilities & CEC_CAP_TRANSMIT))
return -ENOTTY;
if (copy_from_user(&msg, parg, sizeof(msg)))
return -EFAULT;
mutex_lock(&adap->lock);
if (adap->log_addrs.num_log_addrs == 0)
err = -EPERM;
else if (adap->is_configuring)
err = -ENONET;
else if (cec_is_busy(adap, fh))
err = -EBUSY;
else
err = cec_transmit_msg_fh(adap, &msg, fh, block);
mutex_unlock(&adap->lock);
if (err)
return err;
if (copy_to_user(parg, &msg, sizeof(msg)))
return -EFAULT;
return 0;
}
/* Called by CEC_RECEIVE: wait for a message to arrive */
static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
{
u32 timeout = msg->timeout;
int res;
do {
mutex_lock(&fh->lock);
/* Are there received messages queued up? */
if (fh->queued_msgs) {
/* Yes, return the first one */
struct cec_msg_entry *entry =
list_first_entry(&fh->msgs,
struct cec_msg_entry, list);
list_del(&entry->list);
*msg = entry->msg;
kfree(entry);
fh->queued_msgs--;
mutex_unlock(&fh->lock);
/* restore original timeout value */
msg->timeout = timeout;
return 0;
}
/* No, return EAGAIN in non-blocking mode or wait */
mutex_unlock(&fh->lock);
/* Return when in non-blocking mode */
if (!block)
return -EAGAIN;
if (msg->timeout) {
/* The user specified a timeout */
res = wait_event_interruptible_timeout(fh->wait,
fh->queued_msgs,
msecs_to_jiffies(msg->timeout));
if (res == 0)
res = -ETIMEDOUT;
else if (res > 0)
res = 0;
} else {
/* Wait indefinitely */
res = wait_event_interruptible(fh->wait,
fh->queued_msgs);
}
/* Exit on error, otherwise loop to get the new message */
} while (!res);
return res;
}
static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
bool block, struct cec_msg __user *parg)
{
struct cec_msg msg = {};
long err;
if (copy_from_user(&msg, parg, sizeof(msg)))
return -EFAULT;
err = cec_receive_msg(fh, &msg, block);
if (err)
return err;
msg.flags = 0;
if (copy_to_user(parg, &msg, sizeof(msg)))
return -EFAULT;
return 0;
}
static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
bool block, struct cec_event __user *parg)
{
struct cec_event_entry *ev = NULL;
u64 ts = ~0ULL;
unsigned int i;
unsigned int ev_idx;
long err = 0;
mutex_lock(&fh->lock);
while (!fh->total_queued_events && block) {
mutex_unlock(&fh->lock);
err = wait_event_interruptible(fh->wait,
fh->total_queued_events);
if (err)
return err;
mutex_lock(&fh->lock);
}
/* Find the oldest event */
for (i = 0; i < CEC_NUM_EVENTS; i++) {
struct cec_event_entry *entry =
list_first_entry_or_null(&fh->events[i],
struct cec_event_entry, list);
if (entry && entry->ev.ts <= ts) {
ev = entry;
ev_idx = i;
ts = ev->ev.ts;
}
}
if (!ev) {
err = -EAGAIN;
goto unlock;
}
list_del(&ev->list);
if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
err = -EFAULT;
if (ev_idx >= CEC_NUM_CORE_EVENTS)
kfree(ev);
fh->queued_events[ev_idx]--;
fh->total_queued_events--;
unlock:
mutex_unlock(&fh->lock);
return err;
}
static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
u32 __user *parg)
{
u32 mode = fh->mode_initiator | fh->mode_follower;
if (copy_to_user(parg, &mode, sizeof(mode)))
return -EFAULT;
return 0;
}
static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
u32 __user *parg)
{
u32 mode;
u8 mode_initiator;
u8 mode_follower;
bool send_pin_event = false;
long err = 0;
if (copy_from_user(&mode, parg, sizeof(mode)))
return -EFAULT;
if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
dprintk(1, "%s: invalid mode bits set\n", __func__);
return -EINVAL;
}
mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
mode_follower > CEC_MODE_MONITOR_ALL) {
dprintk(1, "%s: unknown mode\n", __func__);
return -EINVAL;
}
if (mode_follower == CEC_MODE_MONITOR_ALL &&
!(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
return -EINVAL;
}
if (mode_follower == CEC_MODE_MONITOR_PIN &&
!(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
return -EINVAL;
}
/* Follower modes should always be able to send CEC messages */
if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
!(adap->capabilities & CEC_CAP_TRANSMIT)) &&
mode_follower >= CEC_MODE_FOLLOWER &&
mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
dprintk(1, "%s: cannot transmit\n", __func__);
return -EINVAL;
}
/* Monitor modes require CEC_MODE_NO_INITIATOR */
if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
__func__);
return -EINVAL;
}
/* Monitor modes require CAP_NET_ADMIN */
if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
return -EPERM;
mutex_lock(&adap->lock);
/*
* You can't become exclusive follower if someone else already
* has that job.
*/
if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
adap->cec_follower && adap->cec_follower != fh)
err = -EBUSY;
/*
* You can't become exclusive initiator if someone else already
* has that job.
*/
if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
adap->cec_initiator && adap->cec_initiator != fh)
err = -EBUSY;
if (!err) {
bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
if (old_mon_all != new_mon_all) {
if (new_mon_all)
err = cec_monitor_all_cnt_inc(adap);
else
cec_monitor_all_cnt_dec(adap);
}
}
if (!err) {
bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
if (old_mon_pin != new_mon_pin) {
send_pin_event = new_mon_pin;
if (new_mon_pin)
err = cec_monitor_pin_cnt_inc(adap);
else
cec_monitor_pin_cnt_dec(adap);
}
}
if (err) {
mutex_unlock(&adap->lock);
return err;
}
if (fh->mode_follower == CEC_MODE_FOLLOWER)
adap->follower_cnt--;
if (mode_follower == CEC_MODE_FOLLOWER)
adap->follower_cnt++;
if (send_pin_event) {
struct cec_event ev = {
.flags = CEC_EVENT_FL_INITIAL_STATE,
};
ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
CEC_EVENT_PIN_CEC_LOW;
cec_queue_event_fh(fh, &ev, 0);
}
if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
adap->passthrough =
mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
adap->cec_follower = fh;
} else if (adap->cec_follower == fh) {
adap->passthrough = false;
adap->cec_follower = NULL;
}
if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
adap->cec_initiator = fh;
else if (adap->cec_initiator == fh)
adap->cec_initiator = NULL;
fh->mode_initiator = mode_initiator;
fh->mode_follower = mode_follower;
mutex_unlock(&adap->lock);
return 0;
}
static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct cec_fh *fh = filp->private_data;
struct cec_adapter *adap = fh->adap;
bool block = !(filp->f_flags & O_NONBLOCK);
void __user *parg = (void __user *)arg;
if (!cec_is_registered(adap))
return -ENODEV;
switch (cmd) {
case CEC_ADAP_G_CAPS:
return cec_adap_g_caps(adap, parg);
case CEC_ADAP_G_PHYS_ADDR:
return cec_adap_g_phys_addr(adap, parg);
case CEC_ADAP_S_PHYS_ADDR:
return cec_adap_s_phys_addr(adap, fh, block, parg);
case CEC_ADAP_G_LOG_ADDRS:
return cec_adap_g_log_addrs(adap, parg);
case CEC_ADAP_S_LOG_ADDRS:
return cec_adap_s_log_addrs(adap, fh, block, parg);
case CEC_ADAP_G_CONNECTOR_INFO:
return cec_adap_g_connector_info(adap, parg);
case CEC_TRANSMIT:
return cec_transmit(adap, fh, block, parg);
case CEC_RECEIVE:
return cec_receive(adap, fh, block, parg);
case CEC_DQEVENT:
return cec_dqevent(adap, fh, block, parg);
case CEC_G_MODE:
return cec_g_mode(adap, fh, parg);
case CEC_S_MODE:
return cec_s_mode(adap, fh, parg);
default:
return -ENOTTY;
}
}
static int cec_open(struct inode *inode, struct file *filp)
{
struct cec_devnode *devnode =
container_of(inode->i_cdev, struct cec_devnode, cdev);
struct cec_adapter *adap = to_cec_adapter(devnode);
struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
/*
* Initial events that are automatically sent when the cec device is
* opened.
*/
struct cec_event ev = {
.event = CEC_EVENT_STATE_CHANGE,
.flags = CEC_EVENT_FL_INITIAL_STATE,
};
unsigned int i;
int err;
if (!fh)
return -ENOMEM;
INIT_LIST_HEAD(&fh->msgs);
INIT_LIST_HEAD(&fh->xfer_list);
for (i = 0; i < CEC_NUM_EVENTS; i++)
INIT_LIST_HEAD(&fh->events[i]);
mutex_init(&fh->lock);
init_waitqueue_head(&fh->wait);
fh->mode_initiator = CEC_MODE_INITIATOR;
fh->adap = adap;
err = cec_get_device(devnode);
if (err) {
kfree(fh);
return err;
}
filp->private_data = fh;
/* Queue up initial state events */
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
ev.state_change.have_conn_info =
adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event_fh(fh, &ev, 0);
#ifdef CONFIG_CEC_PIN
if (adap->pin && adap->pin->ops->read_hpd &&
!adap->devnode.unregistered) {
err = adap->pin->ops->read_hpd(adap);
if (err >= 0) {
ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
CEC_EVENT_PIN_HPD_LOW;
cec_queue_event_fh(fh, &ev, 0);
}
}
if (adap->pin && adap->pin->ops->read_5v &&
!adap->devnode.unregistered) {
err = adap->pin->ops->read_5v(adap);
if (err >= 0) {
ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
CEC_EVENT_PIN_5V_LOW;
cec_queue_event_fh(fh, &ev, 0);
}
}
#endif
mutex_lock(&devnode->lock);
mutex_lock(&devnode->lock_fhs);
list_add(&fh->list, &devnode->fhs);
mutex_unlock(&devnode->lock_fhs);
mutex_unlock(&devnode->lock);
return 0;
}
/* Override for the release function */
static int cec_release(struct inode *inode, struct file *filp)
{
struct cec_devnode *devnode = cec_devnode_data(filp);
struct cec_adapter *adap = to_cec_adapter(devnode);
struct cec_fh *fh = filp->private_data;
unsigned int i;
mutex_lock(&adap->lock);
if (adap->cec_initiator == fh)
adap->cec_initiator = NULL;
if (adap->cec_follower == fh) {
adap->cec_follower = NULL;
adap->passthrough = false;
}
if (fh->mode_follower == CEC_MODE_FOLLOWER)
adap->follower_cnt--;
if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
cec_monitor_pin_cnt_dec(adap);
if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
cec_monitor_all_cnt_dec(adap);
mutex_unlock(&adap->lock);
mutex_lock(&devnode->lock);
mutex_lock(&devnode->lock_fhs);
list_del(&fh->list);
mutex_unlock(&devnode->lock_fhs);
mutex_unlock(&devnode->lock);
/* Unhook pending transmits from this filehandle. */
mutex_lock(&adap->lock);
while (!list_empty(&fh->xfer_list)) {
struct cec_data *data =
list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
data->blocking = false;
data->fh = NULL;
list_del_init(&data->xfer_list);
}
mutex_unlock(&adap->lock);
while (!list_empty(&fh->msgs)) {
struct cec_msg_entry *entry =
list_first_entry(&fh->msgs, struct cec_msg_entry, list);
list_del(&entry->list);
kfree(entry);
}
for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
while (!list_empty(&fh->events[i])) {
struct cec_event_entry *entry =
list_first_entry(&fh->events[i],
struct cec_event_entry, list);
list_del(&entry->list);
kfree(entry);
}
}
kfree(fh);
cec_put_device(devnode);
filp->private_data = NULL;
return 0;
}
const struct file_operations cec_devnode_fops = {
.owner = THIS_MODULE,
.open = cec_open,
.unlocked_ioctl = cec_ioctl,
.compat_ioctl = cec_ioctl,
.release = cec_release,
.poll = cec_poll,
.llseek = no_llseek,
};
| linux-master | drivers/media/cec/core/cec-api.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/sched/types.h>
#include <media/cec-pin.h>
#include "cec-pin-priv.h"
/* All timings are in microseconds */
/* start bit timings */
#define CEC_TIM_START_BIT_LOW 3700
#define CEC_TIM_START_BIT_LOW_MIN 3500
#define CEC_TIM_START_BIT_LOW_MAX 3900
#define CEC_TIM_START_BIT_TOTAL 4500
#define CEC_TIM_START_BIT_TOTAL_MIN 4300
#define CEC_TIM_START_BIT_TOTAL_MAX 4700
/* data bit timings */
#define CEC_TIM_DATA_BIT_0_LOW 1500
#define CEC_TIM_DATA_BIT_0_LOW_MIN 1300
#define CEC_TIM_DATA_BIT_0_LOW_MAX 1700
#define CEC_TIM_DATA_BIT_1_LOW 600
#define CEC_TIM_DATA_BIT_1_LOW_MIN 400
#define CEC_TIM_DATA_BIT_1_LOW_MAX 800
#define CEC_TIM_DATA_BIT_TOTAL 2400
#define CEC_TIM_DATA_BIT_TOTAL_MIN 2050
#define CEC_TIM_DATA_BIT_TOTAL_MAX 2750
/* earliest safe time to sample the bit state */
#define CEC_TIM_DATA_BIT_SAMPLE 850
/* earliest time the bit is back to 1 (T7 + 50) */
#define CEC_TIM_DATA_BIT_HIGH 1750
/* when idle, sample once per millisecond */
#define CEC_TIM_IDLE_SAMPLE 1000
/* when processing the start bit, sample twice per millisecond */
#define CEC_TIM_START_BIT_SAMPLE 500
/* when polling for a state change, sample once every 50 microseconds */
#define CEC_TIM_SAMPLE 50
#define CEC_TIM_LOW_DRIVE_ERROR (1.5 * CEC_TIM_DATA_BIT_TOTAL)
/*
* Total data bit time that is too short/long for a valid bit,
* used for error injection.
*/
#define CEC_TIM_DATA_BIT_TOTAL_SHORT 1800
#define CEC_TIM_DATA_BIT_TOTAL_LONG 2900
/*
* Total start bit time that is too short/long for a valid bit,
* used for error injection.
*/
#define CEC_TIM_START_BIT_TOTAL_SHORT 4100
#define CEC_TIM_START_BIT_TOTAL_LONG 5000
/* Data bits are 0-7, EOM is bit 8 and ACK is bit 9 */
#define EOM_BIT 8
#define ACK_BIT 9
struct cec_state {
const char * const name;
unsigned int usecs;
};
static const struct cec_state states[CEC_PIN_STATES] = {
{ "Off", 0 },
{ "Idle", CEC_TIM_IDLE_SAMPLE },
{ "Tx Wait", CEC_TIM_SAMPLE },
{ "Tx Wait for High", CEC_TIM_IDLE_SAMPLE },
{ "Tx Start Bit Low", CEC_TIM_START_BIT_LOW },
{ "Tx Start Bit High", CEC_TIM_START_BIT_TOTAL - CEC_TIM_START_BIT_LOW },
{ "Tx Start Bit High Short", CEC_TIM_START_BIT_TOTAL_SHORT - CEC_TIM_START_BIT_LOW },
{ "Tx Start Bit High Long", CEC_TIM_START_BIT_TOTAL_LONG - CEC_TIM_START_BIT_LOW },
{ "Tx Start Bit Low Custom", 0 },
{ "Tx Start Bit High Custom", 0 },
{ "Tx Data 0 Low", CEC_TIM_DATA_BIT_0_LOW },
{ "Tx Data 0 High", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_0_LOW },
{ "Tx Data 0 High Short", CEC_TIM_DATA_BIT_TOTAL_SHORT - CEC_TIM_DATA_BIT_0_LOW },
{ "Tx Data 0 High Long", CEC_TIM_DATA_BIT_TOTAL_LONG - CEC_TIM_DATA_BIT_0_LOW },
{ "Tx Data 1 Low", CEC_TIM_DATA_BIT_1_LOW },
{ "Tx Data 1 High", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_1_LOW },
{ "Tx Data 1 High Short", CEC_TIM_DATA_BIT_TOTAL_SHORT - CEC_TIM_DATA_BIT_1_LOW },
{ "Tx Data 1 High Long", CEC_TIM_DATA_BIT_TOTAL_LONG - CEC_TIM_DATA_BIT_1_LOW },
{ "Tx Data 1 High Pre Sample", CEC_TIM_DATA_BIT_SAMPLE - CEC_TIM_DATA_BIT_1_LOW },
{ "Tx Data 1 High Post Sample", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_SAMPLE },
{ "Tx Data 1 High Post Sample Short", CEC_TIM_DATA_BIT_TOTAL_SHORT - CEC_TIM_DATA_BIT_SAMPLE },
{ "Tx Data 1 High Post Sample Long", CEC_TIM_DATA_BIT_TOTAL_LONG - CEC_TIM_DATA_BIT_SAMPLE },
{ "Tx Data Bit Low Custom", 0 },
{ "Tx Data Bit High Custom", 0 },
{ "Tx Pulse Low Custom", 0 },
{ "Tx Pulse High Custom", 0 },
{ "Tx Low Drive", CEC_TIM_LOW_DRIVE_ERROR },
{ "Rx Start Bit Low", CEC_TIM_SAMPLE },
{ "Rx Start Bit High", CEC_TIM_SAMPLE },
{ "Rx Data Sample", CEC_TIM_DATA_BIT_SAMPLE },
{ "Rx Data Post Sample", CEC_TIM_DATA_BIT_HIGH - CEC_TIM_DATA_BIT_SAMPLE },
{ "Rx Data Wait for Low", CEC_TIM_SAMPLE },
{ "Rx Ack Low", CEC_TIM_DATA_BIT_0_LOW },
{ "Rx Ack Low Post", CEC_TIM_DATA_BIT_HIGH - CEC_TIM_DATA_BIT_0_LOW },
{ "Rx Ack High Post", CEC_TIM_DATA_BIT_HIGH },
{ "Rx Ack Finish", CEC_TIM_DATA_BIT_TOTAL_MIN - CEC_TIM_DATA_BIT_HIGH },
{ "Rx Low Drive", CEC_TIM_LOW_DRIVE_ERROR },
{ "Rx Irq", 0 },
};
static void cec_pin_update(struct cec_pin *pin, bool v, bool force)
{
if (!force && v == pin->adap->cec_pin_is_high)
return;
pin->adap->cec_pin_is_high = v;
if (atomic_read(&pin->work_pin_num_events) < CEC_NUM_PIN_EVENTS) {
u8 ev = v;
if (pin->work_pin_events_dropped) {
pin->work_pin_events_dropped = false;
ev |= CEC_PIN_EVENT_FL_DROPPED;
}
pin->work_pin_events[pin->work_pin_events_wr] = ev;
pin->work_pin_ts[pin->work_pin_events_wr] = ktime_get();
pin->work_pin_events_wr =
(pin->work_pin_events_wr + 1) % CEC_NUM_PIN_EVENTS;
atomic_inc(&pin->work_pin_num_events);
} else {
pin->work_pin_events_dropped = true;
pin->work_pin_events_dropped_cnt++;
}
wake_up_interruptible(&pin->kthread_waitq);
}
static bool cec_pin_read(struct cec_pin *pin)
{
bool v = call_pin_op(pin, read);
cec_pin_update(pin, v, false);
return v;
}
static void cec_pin_low(struct cec_pin *pin)
{
call_void_pin_op(pin, low);
cec_pin_update(pin, false, false);
}
static bool cec_pin_high(struct cec_pin *pin)
{
call_void_pin_op(pin, high);
return cec_pin_read(pin);
}
static bool rx_error_inj(struct cec_pin *pin, unsigned int mode_offset,
int arg_idx, u8 *arg)
{
#ifdef CONFIG_CEC_PIN_ERROR_INJ
u16 cmd = cec_pin_rx_error_inj(pin);
u64 e = pin->error_inj[cmd];
unsigned int mode = (e >> mode_offset) & CEC_ERROR_INJ_MODE_MASK;
if (arg_idx >= 0) {
u8 pos = pin->error_inj_args[cmd][arg_idx];
if (arg)
*arg = pos;
else if (pos != pin->rx_bit)
return false;
}
switch (mode) {
case CEC_ERROR_INJ_MODE_ONCE:
pin->error_inj[cmd] &=
~(CEC_ERROR_INJ_MODE_MASK << mode_offset);
return true;
case CEC_ERROR_INJ_MODE_ALWAYS:
return true;
case CEC_ERROR_INJ_MODE_TOGGLE:
return pin->rx_toggle;
default:
return false;
}
#else
return false;
#endif
}
static bool rx_nack(struct cec_pin *pin)
{
return rx_error_inj(pin, CEC_ERROR_INJ_RX_NACK_OFFSET, -1, NULL);
}
static bool rx_low_drive(struct cec_pin *pin)
{
return rx_error_inj(pin, CEC_ERROR_INJ_RX_LOW_DRIVE_OFFSET,
CEC_ERROR_INJ_RX_LOW_DRIVE_ARG_IDX, NULL);
}
static bool rx_add_byte(struct cec_pin *pin)
{
return rx_error_inj(pin, CEC_ERROR_INJ_RX_ADD_BYTE_OFFSET, -1, NULL);
}
static bool rx_remove_byte(struct cec_pin *pin)
{
return rx_error_inj(pin, CEC_ERROR_INJ_RX_REMOVE_BYTE_OFFSET, -1, NULL);
}
static bool rx_arb_lost(struct cec_pin *pin, u8 *poll)
{
return pin->tx_msg.len == 0 &&
rx_error_inj(pin, CEC_ERROR_INJ_RX_ARB_LOST_OFFSET,
CEC_ERROR_INJ_RX_ARB_LOST_ARG_IDX, poll);
}
static bool tx_error_inj(struct cec_pin *pin, unsigned int mode_offset,
int arg_idx, u8 *arg)
{
#ifdef CONFIG_CEC_PIN_ERROR_INJ
u16 cmd = cec_pin_tx_error_inj(pin);
u64 e = pin->error_inj[cmd];
unsigned int mode = (e >> mode_offset) & CEC_ERROR_INJ_MODE_MASK;
if (arg_idx >= 0) {
u8 pos = pin->error_inj_args[cmd][arg_idx];
if (arg)
*arg = pos;
else if (pos != pin->tx_bit)
return false;
}
switch (mode) {
case CEC_ERROR_INJ_MODE_ONCE:
pin->error_inj[cmd] &=
~(CEC_ERROR_INJ_MODE_MASK << mode_offset);
return true;
case CEC_ERROR_INJ_MODE_ALWAYS:
return true;
case CEC_ERROR_INJ_MODE_TOGGLE:
return pin->tx_toggle;
default:
return false;
}
#else
return false;
#endif
}
static bool tx_no_eom(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_NO_EOM_OFFSET, -1, NULL);
}
static bool tx_early_eom(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_EARLY_EOM_OFFSET, -1, NULL);
}
static bool tx_short_bit(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_SHORT_BIT_OFFSET,
CEC_ERROR_INJ_TX_SHORT_BIT_ARG_IDX, NULL);
}
static bool tx_long_bit(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_LONG_BIT_OFFSET,
CEC_ERROR_INJ_TX_LONG_BIT_ARG_IDX, NULL);
}
static bool tx_custom_bit(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_CUSTOM_BIT_OFFSET,
CEC_ERROR_INJ_TX_CUSTOM_BIT_ARG_IDX, NULL);
}
static bool tx_short_start(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_SHORT_START_OFFSET, -1, NULL);
}
static bool tx_long_start(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_LONG_START_OFFSET, -1, NULL);
}
static bool tx_custom_start(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_CUSTOM_START_OFFSET,
-1, NULL);
}
static bool tx_last_bit(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_LAST_BIT_OFFSET,
CEC_ERROR_INJ_TX_LAST_BIT_ARG_IDX, NULL);
}
static u8 tx_add_bytes(struct cec_pin *pin)
{
u8 bytes;
if (tx_error_inj(pin, CEC_ERROR_INJ_TX_ADD_BYTES_OFFSET,
CEC_ERROR_INJ_TX_ADD_BYTES_ARG_IDX, &bytes))
return bytes;
return 0;
}
static bool tx_remove_byte(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_REMOVE_BYTE_OFFSET, -1, NULL);
}
static bool tx_low_drive(struct cec_pin *pin)
{
return tx_error_inj(pin, CEC_ERROR_INJ_TX_LOW_DRIVE_OFFSET,
CEC_ERROR_INJ_TX_LOW_DRIVE_ARG_IDX, NULL);
}
static void cec_pin_to_idle(struct cec_pin *pin)
{
/*
* Reset all status fields, release the bus and
* go to idle state.
*/
pin->rx_bit = pin->tx_bit = 0;
pin->rx_msg.len = 0;
memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg));
pin->ts = ns_to_ktime(0);
pin->tx_generated_poll = false;
pin->tx_post_eom = false;
if (pin->state >= CEC_ST_TX_WAIT &&
pin->state <= CEC_ST_TX_LOW_DRIVE)
pin->tx_toggle ^= 1;
if (pin->state >= CEC_ST_RX_START_BIT_LOW &&
pin->state <= CEC_ST_RX_LOW_DRIVE)
pin->rx_toggle ^= 1;
pin->state = CEC_ST_IDLE;
}
/*
* Handle Transmit-related states
*
* Basic state changes when transmitting:
*
* Idle -> Tx Wait (waiting for the end of signal free time) ->
* Tx Start Bit Low -> Tx Start Bit High ->
*
* Regular data bits + EOM:
* Tx Data 0 Low -> Tx Data 0 High ->
* or:
* Tx Data 1 Low -> Tx Data 1 High ->
*
* First 4 data bits or Ack bit:
* Tx Data 0 Low -> Tx Data 0 High ->
* or:
* Tx Data 1 Low -> Tx Data 1 High -> Tx Data 1 Pre Sample ->
* Tx Data 1 Post Sample ->
*
* After the last Ack go to Idle.
*
* If it detects a Low Drive condition then:
* Tx Wait For High -> Idle
*
* If it loses arbitration, then it switches to state Rx Data Post Sample.
*/
static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
{
bool v;
bool is_ack_bit, ack;
switch (pin->state) {
case CEC_ST_TX_WAIT_FOR_HIGH:
if (cec_pin_read(pin))
cec_pin_to_idle(pin);
break;
case CEC_ST_TX_START_BIT_LOW:
if (tx_short_start(pin)) {
/*
* Error Injection: send an invalid (too short)
* start pulse.
*/
pin->state = CEC_ST_TX_START_BIT_HIGH_SHORT;
} else if (tx_long_start(pin)) {
/*
* Error Injection: send an invalid (too long)
* start pulse.
*/
pin->state = CEC_ST_TX_START_BIT_HIGH_LONG;
} else {
pin->state = CEC_ST_TX_START_BIT_HIGH;
}
/* Generate start bit */
cec_pin_high(pin);
break;
case CEC_ST_TX_START_BIT_LOW_CUSTOM:
pin->state = CEC_ST_TX_START_BIT_HIGH_CUSTOM;
/* Generate start bit */
cec_pin_high(pin);
break;
case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE:
case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_SHORT:
case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_LONG:
if (pin->tx_nacked) {
cec_pin_to_idle(pin);
pin->tx_msg.len = 0;
if (pin->tx_generated_poll)
break;
pin->work_tx_ts = ts;
pin->work_tx_status = CEC_TX_STATUS_NACK;
wake_up_interruptible(&pin->kthread_waitq);
break;
}
fallthrough;
case CEC_ST_TX_DATA_BIT_0_HIGH:
case CEC_ST_TX_DATA_BIT_0_HIGH_SHORT:
case CEC_ST_TX_DATA_BIT_0_HIGH_LONG:
case CEC_ST_TX_DATA_BIT_1_HIGH:
case CEC_ST_TX_DATA_BIT_1_HIGH_SHORT:
case CEC_ST_TX_DATA_BIT_1_HIGH_LONG:
/*
* If the read value is 1, then all is OK, otherwise we have a
* low drive condition.
*
* Special case: when we generate a poll message due to an
* Arbitration Lost error injection, then ignore this since
* the pin can actually be low in that case.
*/
if (!cec_pin_read(pin) && !pin->tx_generated_poll) {
/*
* It's 0, so someone detected an error and pulled the
* line low for 1.5 times the nominal bit period.
*/
pin->tx_msg.len = 0;
pin->state = CEC_ST_TX_WAIT_FOR_HIGH;
pin->work_tx_ts = ts;
pin->work_tx_status = CEC_TX_STATUS_LOW_DRIVE;
pin->tx_low_drive_cnt++;
wake_up_interruptible(&pin->kthread_waitq);
break;
}
fallthrough;
case CEC_ST_TX_DATA_BIT_HIGH_CUSTOM:
if (tx_last_bit(pin)) {
/* Error Injection: just stop sending after this bit */
cec_pin_to_idle(pin);
pin->tx_msg.len = 0;
if (pin->tx_generated_poll)
break;
pin->work_tx_ts = ts;
pin->work_tx_status = CEC_TX_STATUS_OK;
wake_up_interruptible(&pin->kthread_waitq);
break;
}
pin->tx_bit++;
fallthrough;
case CEC_ST_TX_START_BIT_HIGH:
case CEC_ST_TX_START_BIT_HIGH_SHORT:
case CEC_ST_TX_START_BIT_HIGH_LONG:
case CEC_ST_TX_START_BIT_HIGH_CUSTOM:
if (tx_low_drive(pin)) {
/* Error injection: go to low drive */
cec_pin_low(pin);
pin->state = CEC_ST_TX_LOW_DRIVE;
pin->tx_msg.len = 0;
if (pin->tx_generated_poll)
break;
pin->work_tx_ts = ts;
pin->work_tx_status = CEC_TX_STATUS_LOW_DRIVE;
pin->tx_low_drive_cnt++;
wake_up_interruptible(&pin->kthread_waitq);
break;
}
if (pin->tx_bit / 10 >= pin->tx_msg.len + pin->tx_extra_bytes) {
cec_pin_to_idle(pin);
pin->tx_msg.len = 0;
if (pin->tx_generated_poll)
break;
pin->work_tx_ts = ts;
pin->work_tx_status = CEC_TX_STATUS_OK;
wake_up_interruptible(&pin->kthread_waitq);
break;
}
switch (pin->tx_bit % 10) {
default: {
/*
* In the CEC_ERROR_INJ_TX_ADD_BYTES case we transmit
* extra bytes, so pin->tx_bit / 10 can become >= 16.
* Generate bit values for those extra bytes instead
* of reading them from the transmit buffer.
*/
unsigned int idx = (pin->tx_bit / 10);
u8 val = idx;
if (idx < pin->tx_msg.len)
val = pin->tx_msg.msg[idx];
v = val & (1 << (7 - (pin->tx_bit % 10)));
pin->state = v ? CEC_ST_TX_DATA_BIT_1_LOW :
CEC_ST_TX_DATA_BIT_0_LOW;
break;
}
case EOM_BIT: {
unsigned int tot_len = pin->tx_msg.len +
pin->tx_extra_bytes;
unsigned int tx_byte_idx = pin->tx_bit / 10;
v = !pin->tx_post_eom && tx_byte_idx == tot_len - 1;
if (tot_len > 1 && tx_byte_idx == tot_len - 2 &&
tx_early_eom(pin)) {
/* Error injection: set EOM one byte early */
v = true;
pin->tx_post_eom = true;
} else if (v && tx_no_eom(pin)) {
/* Error injection: no EOM */
v = false;
}
pin->state = v ? CEC_ST_TX_DATA_BIT_1_LOW :
CEC_ST_TX_DATA_BIT_0_LOW;
break;
}
case ACK_BIT:
pin->state = CEC_ST_TX_DATA_BIT_1_LOW;
break;
}
if (tx_custom_bit(pin))
pin->state = CEC_ST_TX_DATA_BIT_LOW_CUSTOM;
cec_pin_low(pin);
break;
case CEC_ST_TX_DATA_BIT_0_LOW:
case CEC_ST_TX_DATA_BIT_1_LOW:
v = pin->state == CEC_ST_TX_DATA_BIT_1_LOW;
is_ack_bit = pin->tx_bit % 10 == ACK_BIT;
if (v && (pin->tx_bit < 4 || is_ack_bit)) {
pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE;
} else if (!is_ack_bit && tx_short_bit(pin)) {
/* Error Injection: send an invalid (too short) bit */
pin->state = v ? CEC_ST_TX_DATA_BIT_1_HIGH_SHORT :
CEC_ST_TX_DATA_BIT_0_HIGH_SHORT;
} else if (!is_ack_bit && tx_long_bit(pin)) {
/* Error Injection: send an invalid (too long) bit */
pin->state = v ? CEC_ST_TX_DATA_BIT_1_HIGH_LONG :
CEC_ST_TX_DATA_BIT_0_HIGH_LONG;
} else {
pin->state = v ? CEC_ST_TX_DATA_BIT_1_HIGH :
CEC_ST_TX_DATA_BIT_0_HIGH;
}
cec_pin_high(pin);
break;
case CEC_ST_TX_DATA_BIT_LOW_CUSTOM:
pin->state = CEC_ST_TX_DATA_BIT_HIGH_CUSTOM;
cec_pin_high(pin);
break;
case CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE:
/* Read the CEC value at the sample time */
v = cec_pin_read(pin);
is_ack_bit = pin->tx_bit % 10 == ACK_BIT;
/*
* If v == 0 and we're within the first 4 bits
* of the initiator, then someone else started
* transmitting and we lost the arbitration
* (i.e. the logical address of the other
* transmitter has more leading 0 bits in the
* initiator).
*/
if (!v && !is_ack_bit && !pin->tx_generated_poll) {
pin->tx_msg.len = 0;
pin->work_tx_ts = ts;
pin->work_tx_status = CEC_TX_STATUS_ARB_LOST;
wake_up_interruptible(&pin->kthread_waitq);
pin->rx_bit = pin->tx_bit;
pin->tx_bit = 0;
memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg));
pin->rx_msg.msg[0] = pin->tx_msg.msg[0];
pin->rx_msg.msg[0] &= (0xff << (8 - pin->rx_bit));
pin->rx_msg.len = 0;
pin->ts = ktime_sub_us(ts, CEC_TIM_DATA_BIT_SAMPLE);
pin->state = CEC_ST_RX_DATA_POST_SAMPLE;
pin->rx_bit++;
break;
}
pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE;
if (!is_ack_bit && tx_short_bit(pin)) {
/* Error Injection: send an invalid (too short) bit */
pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_SHORT;
} else if (!is_ack_bit && tx_long_bit(pin)) {
/* Error Injection: send an invalid (too long) bit */
pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_LONG;
}
if (!is_ack_bit)
break;
/* Was the message ACKed? */
ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
if (!ack && (!pin->tx_ignore_nack_until_eom ||
pin->tx_bit / 10 == pin->tx_msg.len - 1) &&
!pin->tx_post_eom) {
/*
* Note: the CEC spec is ambiguous regarding
* what action to take when a NACK appears
* before the last byte of the payload was
* transmitted: either stop transmitting
* immediately, or wait until the last byte
* was transmitted.
*
* Most CEC implementations appear to stop
* immediately, and that's what we do here
* as well.
*/
pin->tx_nacked = true;
}
break;
case CEC_ST_TX_PULSE_LOW_CUSTOM:
cec_pin_high(pin);
pin->state = CEC_ST_TX_PULSE_HIGH_CUSTOM;
break;
case CEC_ST_TX_PULSE_HIGH_CUSTOM:
cec_pin_to_idle(pin);
break;
default:
break;
}
}
/*
* Handle Receive-related states
*
* Basic state changes when receiving:
*
* Rx Start Bit Low -> Rx Start Bit High ->
* Regular data bits + EOM:
* Rx Data Sample -> Rx Data Post Sample -> Rx Data High ->
* Ack bit 0:
* Rx Ack Low -> Rx Ack Low Post -> Rx Data High ->
* Ack bit 1:
* Rx Ack High Post -> Rx Data High ->
* Ack bit 0 && EOM:
* Rx Ack Low -> Rx Ack Low Post -> Rx Ack Finish -> Idle
*/
static void cec_pin_rx_states(struct cec_pin *pin, ktime_t ts)
{
s32 delta;
bool v;
bool ack;
bool bcast, for_us;
u8 dest;
u8 poll;
switch (pin->state) {
/* Receive states */
case CEC_ST_RX_START_BIT_LOW:
v = cec_pin_read(pin);
if (!v)
break;
pin->state = CEC_ST_RX_START_BIT_HIGH;
delta = ktime_us_delta(ts, pin->ts);
/* Start bit low is too short, go back to idle */
if (delta < CEC_TIM_START_BIT_LOW_MIN - CEC_TIM_IDLE_SAMPLE) {
if (!pin->rx_start_bit_low_too_short_cnt++) {
pin->rx_start_bit_low_too_short_ts = ktime_to_ns(pin->ts);
pin->rx_start_bit_low_too_short_delta = delta;
}
cec_pin_to_idle(pin);
break;
}
if (rx_arb_lost(pin, &poll)) {
cec_msg_init(&pin->tx_msg, poll >> 4, poll & 0xf);
pin->tx_generated_poll = true;
pin->tx_extra_bytes = 0;
pin->state = CEC_ST_TX_START_BIT_HIGH;
pin->ts = ts;
}
break;
case CEC_ST_RX_START_BIT_HIGH:
v = cec_pin_read(pin);
delta = ktime_us_delta(ts, pin->ts);
/*
* Unfortunately the spec does not specify when to give up
* and go to idle. We just pick TOTAL_LONG.
*/
if (v && delta > CEC_TIM_START_BIT_TOTAL_LONG) {
pin->rx_start_bit_too_long_cnt++;
cec_pin_to_idle(pin);
break;
}
if (v)
break;
/* Start bit is too short, go back to idle */
if (delta < CEC_TIM_START_BIT_TOTAL_MIN - CEC_TIM_IDLE_SAMPLE) {
if (!pin->rx_start_bit_too_short_cnt++) {
pin->rx_start_bit_too_short_ts = ktime_to_ns(pin->ts);
pin->rx_start_bit_too_short_delta = delta;
}
cec_pin_to_idle(pin);
break;
}
if (rx_low_drive(pin)) {
/* Error injection: go to low drive */
cec_pin_low(pin);
pin->state = CEC_ST_RX_LOW_DRIVE;
pin->rx_low_drive_cnt++;
break;
}
pin->state = CEC_ST_RX_DATA_SAMPLE;
pin->ts = ts;
pin->rx_eom = false;
break;
case CEC_ST_RX_DATA_SAMPLE:
v = cec_pin_read(pin);
pin->state = CEC_ST_RX_DATA_POST_SAMPLE;
switch (pin->rx_bit % 10) {
default:
if (pin->rx_bit / 10 < CEC_MAX_MSG_SIZE)
pin->rx_msg.msg[pin->rx_bit / 10] |=
v << (7 - (pin->rx_bit % 10));
break;
case EOM_BIT:
pin->rx_eom = v;
pin->rx_msg.len = pin->rx_bit / 10 + 1;
break;
case ACK_BIT:
break;
}
pin->rx_bit++;
break;
case CEC_ST_RX_DATA_POST_SAMPLE:
pin->state = CEC_ST_RX_DATA_WAIT_FOR_LOW;
break;
case CEC_ST_RX_DATA_WAIT_FOR_LOW:
v = cec_pin_read(pin);
delta = ktime_us_delta(ts, pin->ts);
/*
* Unfortunately the spec does not specify when to give up
* and go to idle. We just pick TOTAL_LONG.
*/
if (v && delta > CEC_TIM_DATA_BIT_TOTAL_LONG) {
pin->rx_data_bit_too_long_cnt++;
cec_pin_to_idle(pin);
break;
}
if (v)
break;
if (rx_low_drive(pin)) {
/* Error injection: go to low drive */
cec_pin_low(pin);
pin->state = CEC_ST_RX_LOW_DRIVE;
pin->rx_low_drive_cnt++;
break;
}
/*
* Go to low drive state when the total bit time is
* too short.
*/
if (delta < CEC_TIM_DATA_BIT_TOTAL_MIN) {
if (!pin->rx_data_bit_too_short_cnt++) {
pin->rx_data_bit_too_short_ts = ktime_to_ns(pin->ts);
pin->rx_data_bit_too_short_delta = delta;
}
cec_pin_low(pin);
pin->state = CEC_ST_RX_LOW_DRIVE;
pin->rx_low_drive_cnt++;
break;
}
pin->ts = ts;
if (pin->rx_bit % 10 != 9) {
pin->state = CEC_ST_RX_DATA_SAMPLE;
break;
}
dest = cec_msg_destination(&pin->rx_msg);
bcast = dest == CEC_LOG_ADDR_BROADCAST;
/* for_us == broadcast or directed to us */
for_us = bcast || (pin->la_mask & (1 << dest));
/* ACK bit value */
ack = bcast ? 1 : !for_us;
if (for_us && rx_nack(pin)) {
/* Error injection: toggle the ACK bit */
ack = !ack;
}
if (ack) {
/* No need to write to the bus, just wait */
pin->state = CEC_ST_RX_ACK_HIGH_POST;
break;
}
cec_pin_low(pin);
pin->state = CEC_ST_RX_ACK_LOW;
break;
case CEC_ST_RX_ACK_LOW:
cec_pin_high(pin);
pin->state = CEC_ST_RX_ACK_LOW_POST;
break;
case CEC_ST_RX_ACK_LOW_POST:
case CEC_ST_RX_ACK_HIGH_POST:
v = cec_pin_read(pin);
if (v && pin->rx_eom) {
pin->work_rx_msg = pin->rx_msg;
pin->work_rx_msg.rx_ts = ktime_to_ns(ts);
wake_up_interruptible(&pin->kthread_waitq);
pin->ts = ts;
pin->state = CEC_ST_RX_ACK_FINISH;
break;
}
pin->rx_bit++;
pin->state = CEC_ST_RX_DATA_WAIT_FOR_LOW;
break;
case CEC_ST_RX_ACK_FINISH:
cec_pin_to_idle(pin);
break;
default:
break;
}
}
/*
* Main timer function
*
*/
static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
{
struct cec_pin *pin = container_of(timer, struct cec_pin, timer);
struct cec_adapter *adap = pin->adap;
ktime_t ts;
s32 delta;
u32 usecs;
ts = ktime_get();
if (ktime_to_ns(pin->timer_ts)) {
delta = ktime_us_delta(ts, pin->timer_ts);
pin->timer_cnt++;
if (delta > 100 && pin->state != CEC_ST_IDLE) {
/* Keep track of timer overruns */
pin->timer_sum_overrun += delta;
pin->timer_100us_overruns++;
if (delta > 300)
pin->timer_300us_overruns++;
if (delta > pin->timer_max_overrun)
pin->timer_max_overrun = delta;
}
}
if (adap->monitor_pin_cnt)
cec_pin_read(pin);
if (pin->wait_usecs) {
/*
* If we are monitoring the pin, then we have to
* sample at regular intervals.
*/
if (pin->wait_usecs > 150) {
pin->wait_usecs -= 100;
pin->timer_ts = ktime_add_us(ts, 100);
hrtimer_forward_now(timer, ns_to_ktime(100000));
return HRTIMER_RESTART;
}
if (pin->wait_usecs > 100) {
pin->wait_usecs /= 2;
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
hrtimer_forward_now(timer,
ns_to_ktime(pin->wait_usecs * 1000));
return HRTIMER_RESTART;
}
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
hrtimer_forward_now(timer,
ns_to_ktime(pin->wait_usecs * 1000));
pin->wait_usecs = 0;
return HRTIMER_RESTART;
}
switch (pin->state) {
/* Transmit states */
case CEC_ST_TX_WAIT_FOR_HIGH:
case CEC_ST_TX_START_BIT_LOW:
case CEC_ST_TX_START_BIT_HIGH:
case CEC_ST_TX_START_BIT_HIGH_SHORT:
case CEC_ST_TX_START_BIT_HIGH_LONG:
case CEC_ST_TX_START_BIT_LOW_CUSTOM:
case CEC_ST_TX_START_BIT_HIGH_CUSTOM:
case CEC_ST_TX_DATA_BIT_0_LOW:
case CEC_ST_TX_DATA_BIT_0_HIGH:
case CEC_ST_TX_DATA_BIT_0_HIGH_SHORT:
case CEC_ST_TX_DATA_BIT_0_HIGH_LONG:
case CEC_ST_TX_DATA_BIT_1_LOW:
case CEC_ST_TX_DATA_BIT_1_HIGH:
case CEC_ST_TX_DATA_BIT_1_HIGH_SHORT:
case CEC_ST_TX_DATA_BIT_1_HIGH_LONG:
case CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE:
case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE:
case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_SHORT:
case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE_LONG:
case CEC_ST_TX_DATA_BIT_LOW_CUSTOM:
case CEC_ST_TX_DATA_BIT_HIGH_CUSTOM:
case CEC_ST_TX_PULSE_LOW_CUSTOM:
case CEC_ST_TX_PULSE_HIGH_CUSTOM:
cec_pin_tx_states(pin, ts);
break;
/* Receive states */
case CEC_ST_RX_START_BIT_LOW:
case CEC_ST_RX_START_BIT_HIGH:
case CEC_ST_RX_DATA_SAMPLE:
case CEC_ST_RX_DATA_POST_SAMPLE:
case CEC_ST_RX_DATA_WAIT_FOR_LOW:
case CEC_ST_RX_ACK_LOW:
case CEC_ST_RX_ACK_LOW_POST:
case CEC_ST_RX_ACK_HIGH_POST:
case CEC_ST_RX_ACK_FINISH:
cec_pin_rx_states(pin, ts);
break;
case CEC_ST_IDLE:
case CEC_ST_TX_WAIT:
if (!cec_pin_high(pin)) {
/* Start bit, switch to receive state */
pin->ts = ts;
pin->state = CEC_ST_RX_START_BIT_LOW;
/*
* If a transmit is pending, then that transmit should
* use a signal free time of no more than
* CEC_SIGNAL_FREE_TIME_NEW_INITIATOR since it will
* have a new initiator due to the receive that is now
* starting.
*/
if (pin->tx_msg.len && pin->tx_signal_free_time >
CEC_SIGNAL_FREE_TIME_NEW_INITIATOR)
pin->tx_signal_free_time =
CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
break;
}
if (ktime_to_ns(pin->ts) == 0)
pin->ts = ts;
if (pin->tx_msg.len) {
/*
* Check if the bus has been free for long enough
* so we can kick off the pending transmit.
*/
delta = ktime_us_delta(ts, pin->ts);
if (delta / CEC_TIM_DATA_BIT_TOTAL >=
pin->tx_signal_free_time) {
pin->tx_nacked = false;
if (tx_custom_start(pin))
pin->state = CEC_ST_TX_START_BIT_LOW_CUSTOM;
else
pin->state = CEC_ST_TX_START_BIT_LOW;
/* Generate start bit */
cec_pin_low(pin);
break;
}
if (delta / CEC_TIM_DATA_BIT_TOTAL >=
pin->tx_signal_free_time - 1)
pin->state = CEC_ST_TX_WAIT;
break;
}
if (pin->tx_custom_pulse && pin->state == CEC_ST_IDLE) {
pin->tx_custom_pulse = false;
/* Generate custom pulse */
cec_pin_low(pin);
pin->state = CEC_ST_TX_PULSE_LOW_CUSTOM;
break;
}
if (pin->state != CEC_ST_IDLE || pin->ops->enable_irq == NULL ||
pin->enable_irq_failed || adap->is_configuring ||
adap->is_configured || adap->monitor_all_cnt || !adap->monitor_pin_cnt)
break;
/* Switch to interrupt mode */
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_ENABLE);
pin->state = CEC_ST_RX_IRQ;
wake_up_interruptible(&pin->kthread_waitq);
return HRTIMER_NORESTART;
case CEC_ST_TX_LOW_DRIVE:
case CEC_ST_RX_LOW_DRIVE:
cec_pin_high(pin);
cec_pin_to_idle(pin);
break;
default:
break;
}
switch (pin->state) {
case CEC_ST_TX_START_BIT_LOW_CUSTOM:
case CEC_ST_TX_DATA_BIT_LOW_CUSTOM:
case CEC_ST_TX_PULSE_LOW_CUSTOM:
usecs = pin->tx_custom_low_usecs;
break;
case CEC_ST_TX_START_BIT_HIGH_CUSTOM:
case CEC_ST_TX_DATA_BIT_HIGH_CUSTOM:
case CEC_ST_TX_PULSE_HIGH_CUSTOM:
usecs = pin->tx_custom_high_usecs;
break;
default:
usecs = states[pin->state].usecs;
break;
}
if (!adap->monitor_pin_cnt || usecs <= 150) {
pin->wait_usecs = 0;
pin->timer_ts = ktime_add_us(ts, usecs);
hrtimer_forward_now(timer,
ns_to_ktime(usecs * 1000));
return HRTIMER_RESTART;
}
pin->wait_usecs = usecs - 100;
pin->timer_ts = ktime_add_us(ts, 100);
hrtimer_forward_now(timer, ns_to_ktime(100000));
return HRTIMER_RESTART;
}
static int cec_pin_thread_func(void *_adap)
{
struct cec_adapter *adap = _adap;
struct cec_pin *pin = adap->pin;
pin->enabled_irq = false;
pin->enable_irq_failed = false;
for (;;) {
wait_event_interruptible(pin->kthread_waitq,
kthread_should_stop() ||
pin->work_rx_msg.len ||
pin->work_tx_status ||
atomic_read(&pin->work_irq_change) ||
atomic_read(&pin->work_pin_num_events));
if (kthread_should_stop())
break;
if (pin->work_rx_msg.len) {
struct cec_msg *msg = &pin->work_rx_msg;
if (msg->len > 1 && msg->len < CEC_MAX_MSG_SIZE &&
rx_add_byte(pin)) {
/* Error injection: add byte to the message */
msg->msg[msg->len++] = 0x55;
}
if (msg->len > 2 && rx_remove_byte(pin)) {
/* Error injection: remove byte from message */
msg->len--;
}
if (msg->len > CEC_MAX_MSG_SIZE)
msg->len = CEC_MAX_MSG_SIZE;
cec_received_msg_ts(adap, msg,
ns_to_ktime(pin->work_rx_msg.rx_ts));
msg->len = 0;
}
if (pin->work_tx_status) {
unsigned int tx_status = pin->work_tx_status;
pin->work_tx_status = 0;
cec_transmit_attempt_done_ts(adap, tx_status,
pin->work_tx_ts);
}
while (atomic_read(&pin->work_pin_num_events)) {
unsigned int idx = pin->work_pin_events_rd;
u8 v = pin->work_pin_events[idx];
cec_queue_pin_cec_event(adap,
v & CEC_PIN_EVENT_FL_IS_HIGH,
v & CEC_PIN_EVENT_FL_DROPPED,
pin->work_pin_ts[idx]);
pin->work_pin_events_rd = (idx + 1) % CEC_NUM_PIN_EVENTS;
atomic_dec(&pin->work_pin_num_events);
}
switch (atomic_xchg(&pin->work_irq_change,
CEC_PIN_IRQ_UNCHANGED)) {
case CEC_PIN_IRQ_DISABLE:
if (pin->enabled_irq) {
pin->ops->disable_irq(adap);
pin->enabled_irq = false;
pin->enable_irq_failed = false;
}
cec_pin_high(pin);
if (pin->state == CEC_ST_OFF)
break;
cec_pin_to_idle(pin);
hrtimer_start(&pin->timer, ns_to_ktime(0),
HRTIMER_MODE_REL);
break;
case CEC_PIN_IRQ_ENABLE:
if (pin->enabled_irq || !pin->ops->enable_irq ||
pin->adap->devnode.unregistered)
break;
pin->enable_irq_failed = !pin->ops->enable_irq(adap);
if (pin->enable_irq_failed) {
cec_pin_to_idle(pin);
hrtimer_start(&pin->timer, ns_to_ktime(0),
HRTIMER_MODE_REL);
} else {
pin->enabled_irq = true;
}
break;
default:
break;
}
}
if (pin->enabled_irq) {
pin->ops->disable_irq(pin->adap);
pin->enabled_irq = false;
pin->enable_irq_failed = false;
cec_pin_high(pin);
}
return 0;
}
static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
{
struct cec_pin *pin = adap->pin;
if (enable) {
cec_pin_read(pin);
cec_pin_to_idle(pin);
pin->tx_msg.len = 0;
pin->timer_ts = ns_to_ktime(0);
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED);
if (!pin->kthread) {
pin->kthread = kthread_run(cec_pin_thread_func, adap,
"cec-pin");
if (IS_ERR(pin->kthread)) {
int err = PTR_ERR(pin->kthread);
pr_err("cec-pin: kernel_thread() failed\n");
pin->kthread = NULL;
return err;
}
}
hrtimer_start(&pin->timer, ns_to_ktime(0),
HRTIMER_MODE_REL);
} else if (pin->kthread) {
hrtimer_cancel(&pin->timer);
cec_pin_high(pin);
cec_pin_to_idle(pin);
pin->state = CEC_ST_OFF;
pin->work_tx_status = 0;
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE);
wake_up_interruptible(&pin->kthread_waitq);
}
return 0;
}
static int cec_pin_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct cec_pin *pin = adap->pin;
if (log_addr == CEC_LOG_ADDR_INVALID)
pin->la_mask = 0;
else
pin->la_mask |= (1 << log_addr);
return 0;
}
void cec_pin_start_timer(struct cec_pin *pin)
{
if (pin->state != CEC_ST_RX_IRQ)
return;
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE);
wake_up_interruptible(&pin->kthread_waitq);
}
static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct cec_pin *pin = adap->pin;
/*
* If a receive is in progress, then this transmit should use
* a signal free time of max CEC_SIGNAL_FREE_TIME_NEW_INITIATOR
* since when it starts transmitting it will have a new initiator.
*/
if (pin->state != CEC_ST_IDLE &&
signal_free_time > CEC_SIGNAL_FREE_TIME_NEW_INITIATOR)
signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
pin->tx_signal_free_time = signal_free_time;
pin->tx_extra_bytes = 0;
pin->tx_msg = *msg;
if (msg->len > 1) {
/* Error injection: add byte to the message */
pin->tx_extra_bytes = tx_add_bytes(pin);
}
if (msg->len > 2 && tx_remove_byte(pin)) {
/* Error injection: remove byte from the message */
pin->tx_msg.len--;
}
pin->work_tx_status = 0;
pin->tx_bit = 0;
cec_pin_start_timer(pin);
return 0;
}
static void cec_pin_adap_status(struct cec_adapter *adap,
struct seq_file *file)
{
struct cec_pin *pin = adap->pin;
seq_printf(file, "state: %s\n", states[pin->state].name);
seq_printf(file, "tx_bit: %d\n", pin->tx_bit);
seq_printf(file, "rx_bit: %d\n", pin->rx_bit);
seq_printf(file, "cec pin: %d\n", call_pin_op(pin, read));
seq_printf(file, "cec pin events dropped: %u\n",
pin->work_pin_events_dropped_cnt);
if (pin->ops->enable_irq)
seq_printf(file, "irq %s\n", pin->enabled_irq ? "enabled" :
(pin->enable_irq_failed ? "failed" : "disabled"));
if (pin->timer_100us_overruns) {
seq_printf(file, "timer overruns > 100us: %u of %u\n",
pin->timer_100us_overruns, pin->timer_cnt);
seq_printf(file, "timer overruns > 300us: %u of %u\n",
pin->timer_300us_overruns, pin->timer_cnt);
seq_printf(file, "max timer overrun: %u usecs\n",
pin->timer_max_overrun);
seq_printf(file, "avg timer overrun: %u usecs\n",
pin->timer_sum_overrun / pin->timer_100us_overruns);
}
if (pin->rx_start_bit_low_too_short_cnt)
seq_printf(file,
"rx start bit low too short: %u (delta %u, ts %llu)\n",
pin->rx_start_bit_low_too_short_cnt,
pin->rx_start_bit_low_too_short_delta,
pin->rx_start_bit_low_too_short_ts);
if (pin->rx_start_bit_too_short_cnt)
seq_printf(file,
"rx start bit too short: %u (delta %u, ts %llu)\n",
pin->rx_start_bit_too_short_cnt,
pin->rx_start_bit_too_short_delta,
pin->rx_start_bit_too_short_ts);
if (pin->rx_start_bit_too_long_cnt)
seq_printf(file, "rx start bit too long: %u\n",
pin->rx_start_bit_too_long_cnt);
if (pin->rx_data_bit_too_short_cnt)
seq_printf(file,
"rx data bit too short: %u (delta %u, ts %llu)\n",
pin->rx_data_bit_too_short_cnt,
pin->rx_data_bit_too_short_delta,
pin->rx_data_bit_too_short_ts);
if (pin->rx_data_bit_too_long_cnt)
seq_printf(file, "rx data bit too long: %u\n",
pin->rx_data_bit_too_long_cnt);
seq_printf(file, "rx initiated low drive: %u\n", pin->rx_low_drive_cnt);
seq_printf(file, "tx detected low drive: %u\n", pin->tx_low_drive_cnt);
pin->work_pin_events_dropped_cnt = 0;
pin->timer_cnt = 0;
pin->timer_100us_overruns = 0;
pin->timer_300us_overruns = 0;
pin->timer_max_overrun = 0;
pin->timer_sum_overrun = 0;
pin->rx_start_bit_low_too_short_cnt = 0;
pin->rx_start_bit_too_short_cnt = 0;
pin->rx_start_bit_too_long_cnt = 0;
pin->rx_data_bit_too_short_cnt = 0;
pin->rx_data_bit_too_long_cnt = 0;
pin->rx_low_drive_cnt = 0;
pin->tx_low_drive_cnt = 0;
call_void_pin_op(pin, status, file);
}
static int cec_pin_adap_monitor_all_enable(struct cec_adapter *adap,
bool enable)
{
struct cec_pin *pin = adap->pin;
pin->monitor_all = enable;
return 0;
}
static void cec_pin_adap_free(struct cec_adapter *adap)
{
struct cec_pin *pin = adap->pin;
if (pin->kthread)
kthread_stop(pin->kthread);
pin->kthread = NULL;
if (pin->ops->free)
pin->ops->free(adap);
adap->pin = NULL;
kfree(pin);
}
static int cec_pin_received(struct cec_adapter *adap, struct cec_msg *msg)
{
struct cec_pin *pin = adap->pin;
if (pin->ops->received && !adap->devnode.unregistered)
return pin->ops->received(adap, msg);
return -ENOMSG;
}
void cec_pin_changed(struct cec_adapter *adap, bool value)
{
struct cec_pin *pin = adap->pin;
cec_pin_update(pin, value, false);
if (!value && (adap->is_configuring || adap->is_configured ||
adap->monitor_all_cnt || !adap->monitor_pin_cnt))
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE);
}
EXPORT_SYMBOL_GPL(cec_pin_changed);
static const struct cec_adap_ops cec_pin_adap_ops = {
.adap_enable = cec_pin_adap_enable,
.adap_monitor_all_enable = cec_pin_adap_monitor_all_enable,
.adap_log_addr = cec_pin_adap_log_addr,
.adap_transmit = cec_pin_adap_transmit,
.adap_status = cec_pin_adap_status,
.adap_free = cec_pin_adap_free,
#ifdef CONFIG_CEC_PIN_ERROR_INJ
.error_inj_parse_line = cec_pin_error_inj_parse_line,
.error_inj_show = cec_pin_error_inj_show,
#endif
.received = cec_pin_received,
};
struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
void *priv, const char *name, u32 caps)
{
struct cec_adapter *adap;
struct cec_pin *pin = kzalloc(sizeof(*pin), GFP_KERNEL);
if (pin == NULL)
return ERR_PTR(-ENOMEM);
pin->ops = pin_ops;
hrtimer_init(&pin->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_set(&pin->work_pin_num_events, 0);
pin->timer.function = cec_pin_timer;
init_waitqueue_head(&pin->kthread_waitq);
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
adap = cec_allocate_adapter(&cec_pin_adap_ops, priv, name,
caps | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(adap)) {
kfree(pin);
return adap;
}
adap->pin = pin;
pin->adap = adap;
cec_pin_update(pin, cec_pin_high(pin), true);
return adap;
}
EXPORT_SYMBOL_GPL(cec_pin_allocate_adapter);
| linux-master | drivers/media/cec/core/cec-pin.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* CEC driver for SECO X86 Boards
*
* Author: Ettore Chimenti <[email protected]>
* Copyright (C) 2018, SECO SpA.
* Copyright (C) 2018, Aidilab Srl.
*/
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
/* CEC Framework */
#include <media/cec-notifier.h>
#include "seco-cec.h"
struct secocec_data {
struct device *dev;
struct platform_device *pdev;
struct cec_adapter *cec_adap;
struct cec_notifier *notifier;
struct rc_dev *ir;
char ir_input_phys[32];
int irq;
};
#define smb_wr16(cmd, data) smb_word_op(SECOCEC_MICRO_ADDRESS, \
cmd, data, SMBUS_WRITE, NULL)
#define smb_rd16(cmd, res) smb_word_op(SECOCEC_MICRO_ADDRESS, \
cmd, 0, SMBUS_READ, res)
static int smb_word_op(u16 slave_addr, u8 cmd, u16 data,
u8 operation, u16 *result)
{
unsigned int count;
int status = 0;
/* Active wait until ready */
for (count = 0; count <= SMBTIMEOUT; ++count) {
if (!(inb(HSTS) & BRA_INUSE_STS))
break;
udelay(SMB_POLL_UDELAY);
}
if (count > SMBTIMEOUT)
/* Reset the lock instead of failing */
outb(0xff, HSTS);
outb(0x00, HCNT);
outb((u8)(slave_addr & 0xfe) | operation, XMIT_SLVA);
outb(cmd, HCMD);
inb(HCNT);
if (operation == SMBUS_WRITE) {
outb((u8)data, HDAT0);
outb((u8)(data >> 8), HDAT1);
}
outb(BRA_START + BRA_SMB_CMD_WORD_DATA, HCNT);
for (count = 0; count <= SMBTIMEOUT; count++) {
if (!(inb(HSTS) & BRA_HOST_BUSY))
break;
udelay(SMB_POLL_UDELAY);
}
if (count > SMBTIMEOUT) {
status = -EBUSY;
goto err;
}
if (inb(HSTS) & BRA_HSTS_ERR_MASK) {
status = -EIO;
goto err;
}
if (operation == SMBUS_READ)
*result = ((inb(HDAT0) & 0xff) + ((inb(HDAT1) & 0xff) << 8));
err:
outb(0xff, HSTS);
return status;
}
static int secocec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct secocec_data *cec = cec_get_drvdata(adap);
struct device *dev = cec->dev;
u16 val = 0;
int status;
if (enable) {
/* Clear the status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
if (status)
goto err;
status = smb_wr16(SECOCEC_STATUS_REG_1, val);
if (status)
goto err;
/* Enable the interrupts */
status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
if (status)
goto err;
status = smb_wr16(SECOCEC_ENABLE_REG_1,
val | SECOCEC_ENABLE_REG_1_CEC);
if (status)
goto err;
dev_dbg(dev, "Device enabled\n");
} else {
/* Clear the status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
status = smb_wr16(SECOCEC_STATUS_REG_1, val);
/* Disable the interrupts */
status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
status = smb_wr16(SECOCEC_ENABLE_REG_1, val &
~SECOCEC_ENABLE_REG_1_CEC &
~SECOCEC_ENABLE_REG_1_IR);
dev_dbg(dev, "Device disabled\n");
}
return 0;
err:
return status;
}
static int secocec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
{
u16 enable_val = 0;
int status;
/* Disable device */
status = smb_rd16(SECOCEC_ENABLE_REG_1, &enable_val);
if (status)
return status;
status = smb_wr16(SECOCEC_ENABLE_REG_1,
enable_val & ~SECOCEC_ENABLE_REG_1_CEC);
if (status)
return status;
/* Write logical address
* NOTE: CEC_LOG_ADDR_INVALID is mapped to the 'Unregistered' LA
*/
status = smb_wr16(SECOCEC_DEVICE_LA, logical_addr & 0xf);
if (status)
return status;
/* Re-enable device */
status = smb_wr16(SECOCEC_ENABLE_REG_1,
enable_val | SECOCEC_ENABLE_REG_1_CEC);
if (status)
return status;
return 0;
}
static int secocec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
u16 payload_len, payload_id_len, destination, val = 0;
u8 *payload_msg;
int status;
u8 i;
/* Device msg len already accounts for header */
payload_id_len = msg->len - 1;
/* Send data length */
status = smb_wr16(SECOCEC_WRITE_DATA_LENGTH, payload_id_len);
if (status)
goto err;
/* Send Operation ID if present */
if (payload_id_len > 0) {
status = smb_wr16(SECOCEC_WRITE_OPERATION_ID, msg->msg[1]);
if (status)
goto err;
}
/* Send data if present */
if (payload_id_len > 1) {
/* Only data; */
payload_len = msg->len - 2;
payload_msg = &msg->msg[2];
/* Copy message into registers */
for (i = 0; i < payload_len; i += 2) {
/* hi byte */
val = payload_msg[i + 1] << 8;
/* lo byte */
val |= payload_msg[i];
status = smb_wr16(SECOCEC_WRITE_DATA_00 + i / 2, val);
if (status)
goto err;
}
}
/* Send msg source/destination and fire msg */
destination = msg->msg[0];
status = smb_wr16(SECOCEC_WRITE_BYTE0, destination);
if (status)
goto err;
return 0;
err:
return status;
}
static void secocec_tx_done(struct cec_adapter *adap, u16 status_val)
{
if (status_val & SECOCEC_STATUS_TX_ERROR_MASK) {
if (status_val & SECOCEC_STATUS_TX_NACK_ERROR)
cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK);
else
cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR);
} else {
cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
}
/* Reset status reg */
status_val = SECOCEC_STATUS_TX_ERROR_MASK |
SECOCEC_STATUS_MSG_SENT_MASK |
SECOCEC_STATUS_TX_NACK_ERROR;
smb_wr16(SECOCEC_STATUS, status_val);
}
static void secocec_rx_done(struct cec_adapter *adap, u16 status_val)
{
struct secocec_data *cec = cec_get_drvdata(adap);
struct device *dev = cec->dev;
struct cec_msg msg = { };
bool flag_overflow = false;
u8 payload_len, i = 0;
u8 *payload_msg;
u16 val = 0;
int status;
if (status_val & SECOCEC_STATUS_RX_OVERFLOW_MASK) {
/* NOTE: Untested, it also might not be necessary */
dev_warn(dev, "Received more than 16 bytes. Discarding\n");
flag_overflow = true;
}
if (status_val & SECOCEC_STATUS_RX_ERROR_MASK) {
dev_warn(dev, "Message received with errors. Discarding\n");
status = -EIO;
goto rxerr;
}
/* Read message length */
status = smb_rd16(SECOCEC_READ_DATA_LENGTH, &val);
if (status)
return;
/* Device msg len already accounts for the header */
msg.len = min(val + 1, CEC_MAX_MSG_SIZE);
/* Read logical address */
status = smb_rd16(SECOCEC_READ_BYTE0, &val);
if (status)
return;
/* device stores source LA and destination */
msg.msg[0] = val;
/* Read operation ID */
status = smb_rd16(SECOCEC_READ_OPERATION_ID, &val);
if (status)
return;
msg.msg[1] = val;
/* Read data if present */
if (msg.len > 1) {
payload_len = msg.len - 2;
payload_msg = &msg.msg[2];
/* device stores 2 bytes in every 16-bit val */
for (i = 0; i < payload_len; i += 2) {
status = smb_rd16(SECOCEC_READ_DATA_00 + i / 2, &val);
if (status)
return;
/* low byte, skipping header */
payload_msg[i] = val & 0x00ff;
/* hi byte */
payload_msg[i + 1] = (val & 0xff00) >> 8;
}
}
cec_received_msg(cec->cec_adap, &msg);
/* Reset status reg */
status_val = SECOCEC_STATUS_MSG_RECEIVED_MASK;
if (flag_overflow)
status_val |= SECOCEC_STATUS_RX_OVERFLOW_MASK;
status = smb_wr16(SECOCEC_STATUS, status_val);
return;
rxerr:
/* Reset error reg */
status_val = SECOCEC_STATUS_MSG_RECEIVED_MASK |
SECOCEC_STATUS_RX_ERROR_MASK;
if (flag_overflow)
status_val |= SECOCEC_STATUS_RX_OVERFLOW_MASK;
smb_wr16(SECOCEC_STATUS, status_val);
}
static const struct cec_adap_ops secocec_cec_adap_ops = {
/* Low-level callbacks */
.adap_enable = secocec_adap_enable,
.adap_log_addr = secocec_adap_log_addr,
.adap_transmit = secocec_adap_transmit,
};
#ifdef CONFIG_CEC_SECO_RC
static int secocec_ir_probe(void *priv)
{
struct secocec_data *cec = priv;
struct device *dev = cec->dev;
int status;
u16 val;
/* Prepare the RC input device */
cec->ir = devm_rc_allocate_device(dev, RC_DRIVER_SCANCODE);
if (!cec->ir)
return -ENOMEM;
snprintf(cec->ir_input_phys, sizeof(cec->ir_input_phys),
"%s/input0", dev_name(dev));
cec->ir->device_name = dev_name(dev);
cec->ir->input_phys = cec->ir_input_phys;
cec->ir->input_id.bustype = BUS_HOST;
cec->ir->input_id.vendor = 0;
cec->ir->input_id.product = 0;
cec->ir->input_id.version = 1;
cec->ir->driver_name = SECOCEC_DEV_NAME;
cec->ir->allowed_protocols = RC_PROTO_BIT_RC5;
cec->ir->priv = cec;
cec->ir->map_name = RC_MAP_HAUPPAUGE;
cec->ir->timeout = MS_TO_US(100);
/* Clear the status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
if (status != 0)
goto err;
status = smb_wr16(SECOCEC_STATUS_REG_1, val);
if (status != 0)
goto err;
/* Enable the interrupts */
status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
if (status != 0)
goto err;
status = smb_wr16(SECOCEC_ENABLE_REG_1,
val | SECOCEC_ENABLE_REG_1_IR);
if (status != 0)
goto err;
dev_dbg(dev, "IR enabled\n");
status = devm_rc_register_device(dev, cec->ir);
if (status) {
dev_err(dev, "Failed to prepare input device\n");
cec->ir = NULL;
goto err;
}
return 0;
err:
smb_rd16(SECOCEC_ENABLE_REG_1, &val);
smb_wr16(SECOCEC_ENABLE_REG_1,
val & ~SECOCEC_ENABLE_REG_1_IR);
dev_dbg(dev, "IR disabled\n");
return status;
}
static int secocec_ir_rx(struct secocec_data *priv)
{
struct secocec_data *cec = priv;
struct device *dev = cec->dev;
u16 val, status, key, addr, toggle;
if (!cec->ir)
return -ENODEV;
status = smb_rd16(SECOCEC_IR_READ_DATA, &val);
if (status != 0)
goto err;
key = val & SECOCEC_IR_COMMAND_MASK;
addr = (val & SECOCEC_IR_ADDRESS_MASK) >> SECOCEC_IR_ADDRESS_SHL;
toggle = (val & SECOCEC_IR_TOGGLE_MASK) >> SECOCEC_IR_TOGGLE_SHL;
rc_keydown(cec->ir, RC_PROTO_RC5, RC_SCANCODE_RC5(addr, key), toggle);
dev_dbg(dev, "IR key pressed: 0x%02x addr 0x%02x toggle 0x%02x\n", key,
addr, toggle);
return 0;
err:
dev_err(dev, "IR Receive message failed (%d)\n", status);
return -EIO;
}
#else
static void secocec_ir_rx(struct secocec_data *priv)
{
}
static int secocec_ir_probe(void *priv)
{
return 0;
}
#endif
static irqreturn_t secocec_irq_handler(int irq, void *priv)
{
struct secocec_data *cec = priv;
struct device *dev = cec->dev;
u16 status_val, cec_val, val = 0;
int status;
/* Read status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &status_val);
if (status)
goto err;
if (status_val & SECOCEC_STATUS_REG_1_CEC) {
/* Read CEC status register */
status = smb_rd16(SECOCEC_STATUS, &cec_val);
if (status)
goto err;
if (cec_val & SECOCEC_STATUS_MSG_RECEIVED_MASK)
secocec_rx_done(cec->cec_adap, cec_val);
if (cec_val & SECOCEC_STATUS_MSG_SENT_MASK)
secocec_tx_done(cec->cec_adap, cec_val);
if ((~cec_val & SECOCEC_STATUS_MSG_SENT_MASK) &&
(~cec_val & SECOCEC_STATUS_MSG_RECEIVED_MASK))
dev_warn_once(dev,
"Message not received or sent, but interrupt fired");
val = SECOCEC_STATUS_REG_1_CEC;
}
if (status_val & SECOCEC_STATUS_REG_1_IR) {
val |= SECOCEC_STATUS_REG_1_IR;
secocec_ir_rx(cec);
}
/* Reset status register */
status = smb_wr16(SECOCEC_STATUS_REG_1, val);
if (status)
goto err;
return IRQ_HANDLED;
err:
dev_err_once(dev, "IRQ: R/W SMBus operation failed %d\n", status);
/* Reset status register */
val = SECOCEC_STATUS_REG_1_CEC | SECOCEC_STATUS_REG_1_IR;
smb_wr16(SECOCEC_STATUS_REG_1, val);
return IRQ_HANDLED;
}
struct cec_dmi_match {
const char *sys_vendor;
const char *product_name;
const char *devname;
const char *conn;
};
static const struct cec_dmi_match secocec_dmi_match_table[] = {
/* UDOO X86 */
{ "SECO", "UDOO x86", "0000:00:02.0", "Port B" },
};
static struct device *secocec_cec_find_hdmi_dev(struct device *dev,
const char **conn)
{
int i;
for (i = 0 ; i < ARRAY_SIZE(secocec_dmi_match_table) ; ++i) {
const struct cec_dmi_match *m = &secocec_dmi_match_table[i];
if (dmi_match(DMI_SYS_VENDOR, m->sys_vendor) &&
dmi_match(DMI_PRODUCT_NAME, m->product_name)) {
struct device *d;
/* Find the device, bail out if not yet registered */
d = bus_find_device_by_name(&pci_bus_type, NULL,
m->devname);
if (!d)
return ERR_PTR(-EPROBE_DEFER);
put_device(d);
*conn = m->conn;
return d;
}
}
return ERR_PTR(-EINVAL);
}
static int secocec_acpi_probe(struct secocec_data *sdev)
{
struct device *dev = sdev->dev;
struct gpio_desc *gpio;
int irq = 0;
gpio = devm_gpiod_get(dev, NULL, GPIOD_IN);
if (IS_ERR(gpio)) {
dev_err(dev, "Cannot request interrupt gpio\n");
return PTR_ERR(gpio);
}
irq = gpiod_to_irq(gpio);
if (irq < 0) {
dev_err(dev, "Cannot find valid irq\n");
return -ENODEV;
}
dev_dbg(dev, "irq-gpio is bound to IRQ %d\n", irq);
sdev->irq = irq;
return 0;
}
static int secocec_probe(struct platform_device *pdev)
{
struct secocec_data *secocec;
struct device *dev = &pdev->dev;
struct device *hdmi_dev;
const char *conn = NULL;
int ret;
u16 val;
hdmi_dev = secocec_cec_find_hdmi_dev(&pdev->dev, &conn);
if (IS_ERR(hdmi_dev))
return PTR_ERR(hdmi_dev);
secocec = devm_kzalloc(dev, sizeof(*secocec), GFP_KERNEL);
if (!secocec)
return -ENOMEM;
dev_set_drvdata(dev, secocec);
/* Request SMBus regions */
if (!request_muxed_region(BRA_SMB_BASE_ADDR, 7, "CEC00001")) {
dev_err(dev, "Request memory region failed\n");
return -ENXIO;
}
secocec->pdev = pdev;
secocec->dev = dev;
if (!has_acpi_companion(dev)) {
dev_dbg(dev, "Cannot find any ACPI companion\n");
ret = -ENODEV;
goto err;
}
ret = secocec_acpi_probe(secocec);
if (ret) {
dev_err(dev, "Cannot assign gpio to IRQ\n");
ret = -ENODEV;
goto err;
}
/* Firmware version check */
ret = smb_rd16(SECOCEC_VERSION, &val);
if (ret) {
dev_err(dev, "Cannot check fw version\n");
goto err;
}
if (val < SECOCEC_LATEST_FW) {
dev_err(dev, "CEC Firmware not supported (v.%04x). Use ver > v.%04x\n",
val, SECOCEC_LATEST_FW);
ret = -EINVAL;
goto err;
}
ret = devm_request_threaded_irq(dev,
secocec->irq,
NULL,
secocec_irq_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
dev_name(&pdev->dev), secocec);
if (ret) {
dev_err(dev, "Cannot request IRQ %d\n", secocec->irq);
ret = -EIO;
goto err;
}
/* Allocate CEC adapter */
secocec->cec_adap = cec_allocate_adapter(&secocec_cec_adap_ops,
secocec,
dev_name(dev),
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO,
SECOCEC_MAX_ADDRS);
if (IS_ERR(secocec->cec_adap)) {
ret = PTR_ERR(secocec->cec_adap);
goto err;
}
secocec->notifier = cec_notifier_cec_adap_register(hdmi_dev, conn,
secocec->cec_adap);
if (!secocec->notifier) {
ret = -ENOMEM;
goto err_delete_adapter;
}
ret = cec_register_adapter(secocec->cec_adap, dev);
if (ret)
goto err_notifier;
ret = secocec_ir_probe(secocec);
if (ret)
goto err_notifier;
platform_set_drvdata(pdev, secocec);
dev_dbg(dev, "Device registered\n");
return ret;
err_notifier:
cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap);
err_delete_adapter:
cec_delete_adapter(secocec->cec_adap);
err:
release_region(BRA_SMB_BASE_ADDR, 7);
dev_err(dev, "%s device probe failed\n", dev_name(dev));
return ret;
}
static void secocec_remove(struct platform_device *pdev)
{
struct secocec_data *secocec = platform_get_drvdata(pdev);
u16 val;
if (secocec->ir) {
smb_rd16(SECOCEC_ENABLE_REG_1, &val);
smb_wr16(SECOCEC_ENABLE_REG_1, val & ~SECOCEC_ENABLE_REG_1_IR);
dev_dbg(&pdev->dev, "IR disabled\n");
}
cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap);
cec_unregister_adapter(secocec->cec_adap);
release_region(BRA_SMB_BASE_ADDR, 7);
dev_dbg(&pdev->dev, "CEC device removed\n");
}
#ifdef CONFIG_PM_SLEEP
static int secocec_suspend(struct device *dev)
{
int status;
u16 val;
dev_dbg(dev, "Device going to suspend, disabling\n");
/* Clear the status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
if (status)
goto err;
status = smb_wr16(SECOCEC_STATUS_REG_1, val);
if (status)
goto err;
/* Disable the interrupts */
status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
if (status)
goto err;
status = smb_wr16(SECOCEC_ENABLE_REG_1, val &
~SECOCEC_ENABLE_REG_1_CEC & ~SECOCEC_ENABLE_REG_1_IR);
if (status)
goto err;
return 0;
err:
dev_err(dev, "Suspend failed: %d\n", status);
return status;
}
static int secocec_resume(struct device *dev)
{
int status;
u16 val;
dev_dbg(dev, "Resuming device from suspend\n");
/* Clear the status register */
status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
if (status)
goto err;
status = smb_wr16(SECOCEC_STATUS_REG_1, val);
if (status)
goto err;
/* Enable the interrupts */
status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
if (status)
goto err;
status = smb_wr16(SECOCEC_ENABLE_REG_1, val | SECOCEC_ENABLE_REG_1_CEC);
if (status)
goto err;
dev_dbg(dev, "Device resumed from suspend\n");
return 0;
err:
dev_err(dev, "Resume failed: %d\n", status);
return status;
}
static SIMPLE_DEV_PM_OPS(secocec_pm_ops, secocec_suspend, secocec_resume);
#define SECOCEC_PM_OPS (&secocec_pm_ops)
#else
#define SECOCEC_PM_OPS NULL
#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id secocec_acpi_match[] = {
{"CEC00001", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, secocec_acpi_match);
#endif
static struct platform_driver secocec_driver = {
.driver = {
.name = SECOCEC_DEV_NAME,
.acpi_match_table = ACPI_PTR(secocec_acpi_match),
.pm = SECOCEC_PM_OPS,
},
.probe = secocec_probe,
.remove_new = secocec_remove,
};
module_platform_driver(secocec_driver);
MODULE_DESCRIPTION("SECO CEC X86 Driver");
MODULE_AUTHOR("Ettore Chimenti <[email protected]>");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/media/cec/platform/seco/seco-cec.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* CEC driver for ChromeOS Embedded Controller
*
* Copyright (c) 2018 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/cec.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <media/cec.h>
#include <media/cec-notifier.h>
#define DRV_NAME "cros-ec-cec"
/**
* struct cros_ec_cec - Driver data for EC CEC
*
* @cros_ec: Pointer to EC device
* @notifier: Notifier info for responding to EC events
* @adap: CEC adapter
* @notify: CEC notifier pointer
* @rx_msg: storage for a received message
*/
struct cros_ec_cec {
struct cros_ec_device *cros_ec;
struct notifier_block notifier;
struct cec_adapter *adap;
struct cec_notifier *notify;
struct cec_msg rx_msg;
};
static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
{
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
uint8_t *cec_message = cros_ec->event_data.data.cec_message;
unsigned int len = cros_ec->event_size;
if (len > CEC_MAX_MSG_SIZE)
len = CEC_MAX_MSG_SIZE;
cros_ec_cec->rx_msg.len = len;
memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
cec_received_msg(cros_ec_cec->adap, &cros_ec_cec->rx_msg);
}
static void handle_cec_event(struct cros_ec_cec *cros_ec_cec)
{
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
uint32_t events = cros_ec->event_data.data.cec_events;
if (events & EC_MKBP_CEC_SEND_OK)
cec_transmit_attempt_done(cros_ec_cec->adap,
CEC_TX_STATUS_OK);
/* FW takes care of all retries, tell core to avoid more retries */
if (events & EC_MKBP_CEC_SEND_FAILED)
cec_transmit_attempt_done(cros_ec_cec->adap,
CEC_TX_STATUS_MAX_RETRIES |
CEC_TX_STATUS_NACK);
}
static int cros_ec_cec_event(struct notifier_block *nb,
unsigned long queued_during_suspend,
void *_notify)
{
struct cros_ec_cec *cros_ec_cec;
struct cros_ec_device *cros_ec;
cros_ec_cec = container_of(nb, struct cros_ec_cec, notifier);
cros_ec = cros_ec_cec->cros_ec;
if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_EVENT) {
handle_cec_event(cros_ec_cec);
return NOTIFY_OK;
}
if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_MESSAGE) {
handle_cec_message(cros_ec_cec);
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static int cros_ec_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
{
struct cros_ec_cec *cros_ec_cec = adap->priv;
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
struct {
struct cros_ec_command msg;
struct ec_params_cec_set data;
} __packed msg = {};
int ret;
msg.msg.command = EC_CMD_CEC_SET;
msg.msg.outsize = sizeof(msg.data);
msg.data.cmd = CEC_CMD_LOGICAL_ADDRESS;
msg.data.val = logical_addr;
ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
if (ret < 0) {
dev_err(cros_ec->dev,
"error setting CEC logical address on EC: %d\n", ret);
return ret;
}
return 0;
}
static int cros_ec_cec_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *cec_msg)
{
struct cros_ec_cec *cros_ec_cec = adap->priv;
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
struct {
struct cros_ec_command msg;
struct ec_params_cec_write data;
} __packed msg = {};
int ret;
msg.msg.command = EC_CMD_CEC_WRITE_MSG;
msg.msg.outsize = cec_msg->len;
memcpy(msg.data.msg, cec_msg->msg, cec_msg->len);
ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
if (ret < 0) {
dev_err(cros_ec->dev,
"error writing CEC msg on EC: %d\n", ret);
return ret;
}
return 0;
}
static int cros_ec_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct cros_ec_cec *cros_ec_cec = adap->priv;
struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
struct {
struct cros_ec_command msg;
struct ec_params_cec_set data;
} __packed msg = {};
int ret;
msg.msg.command = EC_CMD_CEC_SET;
msg.msg.outsize = sizeof(msg.data);
msg.data.cmd = CEC_CMD_ENABLE;
msg.data.val = enable;
ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
if (ret < 0) {
dev_err(cros_ec->dev,
"error %sabling CEC on EC: %d\n",
(enable ? "en" : "dis"), ret);
return ret;
}
return 0;
}
static const struct cec_adap_ops cros_ec_cec_ops = {
.adap_enable = cros_ec_cec_adap_enable,
.adap_log_addr = cros_ec_cec_set_log_addr,
.adap_transmit = cros_ec_cec_transmit,
};
#ifdef CONFIG_PM_SLEEP
static int cros_ec_cec_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev))
enable_irq_wake(cros_ec_cec->cros_ec->irq);
return 0;
}
static int cros_ec_cec_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev);
if (device_may_wakeup(dev))
disable_irq_wake(cros_ec_cec->cros_ec->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(cros_ec_cec_pm_ops,
cros_ec_cec_suspend, cros_ec_cec_resume);
#if IS_ENABLED(CONFIG_PCI) && IS_ENABLED(CONFIG_DMI)
/*
* The Firmware only handles a single CEC interface tied to a single HDMI
* connector we specify along with the DRM device name handling the HDMI output
*/
struct cec_dmi_match {
const char *sys_vendor;
const char *product_name;
const char *devname;
const char *conn;
};
static const struct cec_dmi_match cec_dmi_match_table[] = {
/* Google Fizz */
{ "Google", "Fizz", "0000:00:02.0", "Port B" },
/* Google Brask */
{ "Google", "Brask", "0000:00:02.0", "Port B" },
/* Google Moli */
{ "Google", "Moli", "0000:00:02.0", "Port B" },
/* Google Kinox */
{ "Google", "Kinox", "0000:00:02.0", "Port B" },
/* Google Kuldax */
{ "Google", "Kuldax", "0000:00:02.0", "Port B" },
/* Google Aurash */
{ "Google", "Aurash", "0000:00:02.0", "Port B" },
/* Google Gladios */
{ "Google", "Gladios", "0000:00:02.0", "Port B" },
/* Google Lisbon */
{ "Google", "Lisbon", "0000:00:02.0", "Port B" },
};
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
const char **conn)
{
int i;
for (i = 0 ; i < ARRAY_SIZE(cec_dmi_match_table) ; ++i) {
const struct cec_dmi_match *m = &cec_dmi_match_table[i];
if (dmi_match(DMI_SYS_VENDOR, m->sys_vendor) &&
dmi_match(DMI_PRODUCT_NAME, m->product_name)) {
struct device *d;
/* Find the device, bail out if not yet registered */
d = bus_find_device_by_name(&pci_bus_type, NULL,
m->devname);
if (!d)
return ERR_PTR(-EPROBE_DEFER);
put_device(d);
*conn = m->conn;
return d;
}
}
/* Hardware support must be added in the cec_dmi_match_table */
dev_warn(dev, "CEC notifier not configured for this hardware\n");
return ERR_PTR(-ENODEV);
}
#else
static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
const char **conn)
{
return ERR_PTR(-ENODEV);
}
#endif
static int cros_ec_cec_probe(struct platform_device *pdev)
{
struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
struct cros_ec_device *cros_ec = ec_dev->ec_dev;
struct cros_ec_cec *cros_ec_cec;
struct device *hdmi_dev;
const char *conn = NULL;
int ret;
hdmi_dev = cros_ec_cec_find_hdmi_dev(&pdev->dev, &conn);
if (IS_ERR(hdmi_dev))
return PTR_ERR(hdmi_dev);
cros_ec_cec = devm_kzalloc(&pdev->dev, sizeof(*cros_ec_cec),
GFP_KERNEL);
if (!cros_ec_cec)
return -ENOMEM;
platform_set_drvdata(pdev, cros_ec_cec);
cros_ec_cec->cros_ec = cros_ec;
device_init_wakeup(&pdev->dev, 1);
cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec,
DRV_NAME,
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO, 1);
if (IS_ERR(cros_ec_cec->adap))
return PTR_ERR(cros_ec_cec->adap);
cros_ec_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, conn,
cros_ec_cec->adap);
if (!cros_ec_cec->notify) {
ret = -ENOMEM;
goto out_probe_adapter;
}
/* Get CEC events from the EC. */
cros_ec_cec->notifier.notifier_call = cros_ec_cec_event;
ret = blocking_notifier_chain_register(&cros_ec->event_notifier,
&cros_ec_cec->notifier);
if (ret) {
dev_err(&pdev->dev, "failed to register notifier\n");
goto out_probe_notify;
}
ret = cec_register_adapter(cros_ec_cec->adap, &pdev->dev);
if (ret < 0)
goto out_probe_notify;
return 0;
out_probe_notify:
cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
cros_ec_cec->adap);
out_probe_adapter:
cec_delete_adapter(cros_ec_cec->adap);
return ret;
}
static void cros_ec_cec_remove(struct platform_device *pdev)
{
struct cros_ec_cec *cros_ec_cec = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int ret;
/*
* blocking_notifier_chain_unregister() only fails if the notifier isn't
* in the list. We know it was added to it by .probe(), so there should
* be no need for error checking. Be cautious and still check.
*/
ret = blocking_notifier_chain_unregister(
&cros_ec_cec->cros_ec->event_notifier,
&cros_ec_cec->notifier);
if (ret)
dev_err(dev, "failed to unregister notifier\n");
cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
cros_ec_cec->adap);
cec_unregister_adapter(cros_ec_cec->adap);
}
static struct platform_driver cros_ec_cec_driver = {
.probe = cros_ec_cec_probe,
.remove_new = cros_ec_cec_remove,
.driver = {
.name = DRV_NAME,
.pm = &cros_ec_cec_pm_ops,
},
};
module_platform_driver(cros_ec_cec_driver);
MODULE_DESCRIPTION("CEC driver for ChromeOS ECs");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/media/cec/platform/cros-ec/cros-ec-cec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Tegra CEC implementation
*
* The original 3.10 CEC driver using a custom API:
*
* Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
*
* Conversion to the CEC framework and to the mainline kernel:
*
* Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/clk/tegra.h>
#include <media/cec-notifier.h>
#include "tegra_cec.h"
#define TEGRA_CEC_NAME "tegra-cec"
struct tegra_cec {
struct cec_adapter *adap;
struct device *dev;
struct clk *clk;
void __iomem *cec_base;
struct cec_notifier *notifier;
int tegra_cec_irq;
bool rx_done;
bool tx_done;
int tx_status;
u8 rx_buf[CEC_MAX_MSG_SIZE];
u8 rx_buf_cnt;
u32 tx_buf[CEC_MAX_MSG_SIZE];
u8 tx_buf_cur;
u8 tx_buf_cnt;
};
static inline u32 cec_read(struct tegra_cec *cec, u32 reg)
{
return readl(cec->cec_base + reg);
}
static inline void cec_write(struct tegra_cec *cec, u32 reg, u32 val)
{
writel(val, cec->cec_base + reg);
}
static void tegra_cec_error_recovery(struct tegra_cec *cec)
{
u32 hw_ctrl;
hw_ctrl = cec_read(cec, TEGRA_CEC_HW_CONTROL);
cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
cec_write(cec, TEGRA_CEC_HW_CONTROL, hw_ctrl);
}
static irqreturn_t tegra_cec_irq_thread_handler(int irq, void *data)
{
struct device *dev = data;
struct tegra_cec *cec = dev_get_drvdata(dev);
if (cec->tx_done) {
cec_transmit_attempt_done(cec->adap, cec->tx_status);
cec->tx_done = false;
}
if (cec->rx_done) {
struct cec_msg msg = {};
msg.len = cec->rx_buf_cnt;
memcpy(msg.msg, cec->rx_buf, msg.len);
cec_received_msg(cec->adap, &msg);
cec->rx_done = false;
cec->rx_buf_cnt = 0;
}
return IRQ_HANDLED;
}
static irqreturn_t tegra_cec_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct tegra_cec *cec = dev_get_drvdata(dev);
u32 status, mask;
status = cec_read(cec, TEGRA_CEC_INT_STAT);
mask = cec_read(cec, TEGRA_CEC_INT_MASK);
status &= mask;
if (!status)
return IRQ_HANDLED;
if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN) {
dev_err(dev, "TX underrun, interrupt timing issue!\n");
tegra_cec_error_recovery(cec);
cec_write(cec, TEGRA_CEC_INT_MASK,
mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
cec->tx_done = true;
cec->tx_status = CEC_TX_STATUS_ERROR;
return IRQ_WAKE_THREAD;
}
if ((status & TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED) ||
(status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)) {
tegra_cec_error_recovery(cec);
cec_write(cec, TEGRA_CEC_INT_MASK,
mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
cec->tx_done = true;
if (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)
cec->tx_status = CEC_TX_STATUS_LOW_DRIVE;
else
cec->tx_status = CEC_TX_STATUS_ARB_LOST;
return IRQ_WAKE_THREAD;
}
if (status & TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED) {
cec_write(cec, TEGRA_CEC_INT_STAT,
TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED);
if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD) {
tegra_cec_error_recovery(cec);
cec->tx_done = true;
cec->tx_status = CEC_TX_STATUS_NACK;
} else {
cec->tx_done = true;
cec->tx_status = CEC_TX_STATUS_OK;
}
return IRQ_WAKE_THREAD;
}
if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD)
dev_warn(dev, "TX NAKed on the fly!\n");
if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY) {
if (cec->tx_buf_cur == cec->tx_buf_cnt) {
cec_write(cec, TEGRA_CEC_INT_MASK,
mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
} else {
cec_write(cec, TEGRA_CEC_TX_REGISTER,
cec->tx_buf[cec->tx_buf_cur++]);
cec_write(cec, TEGRA_CEC_INT_STAT,
TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY);
}
}
if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) {
cec_write(cec, TEGRA_CEC_INT_STAT,
TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED);
cec->rx_done = false;
cec->rx_buf_cnt = 0;
}
if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
u32 v;
cec_write(cec, TEGRA_CEC_INT_STAT,
TEGRA_CEC_INT_STAT_RX_REGISTER_FULL);
v = cec_read(cec, TEGRA_CEC_RX_REGISTER);
if (cec->rx_buf_cnt < CEC_MAX_MSG_SIZE)
cec->rx_buf[cec->rx_buf_cnt++] = v & 0xff;
if (v & TEGRA_CEC_RX_REGISTER_EOM) {
cec->rx_done = true;
return IRQ_WAKE_THREAD;
}
}
return IRQ_HANDLED;
}
static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct tegra_cec *cec = adap->priv;
cec->rx_buf_cnt = 0;
cec->tx_buf_cnt = 0;
cec->tx_buf_cur = 0;
cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
cec_write(cec, TEGRA_CEC_INT_MASK, 0);
cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
cec_write(cec, TEGRA_CEC_SW_CONTROL, 0);
if (!enable)
return 0;
cec_write(cec, TEGRA_CEC_INPUT_FILTER, (1U << 31) | 0x20);
cec_write(cec, TEGRA_CEC_RX_TIMING_0,
(0x7a << TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT) |
(0x6d << TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT) |
(0x93 << TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT) |
(0x86 << TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT));
cec_write(cec, TEGRA_CEC_RX_TIMING_1,
(0x35 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT) |
(0x21 << TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT) |
(0x56 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT) |
(0x40 << TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT));
cec_write(cec, TEGRA_CEC_RX_TIMING_2,
(0x50 << TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT));
cec_write(cec, TEGRA_CEC_TX_TIMING_0,
(0x74 << TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT) |
(0x8d << TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT) |
(0x08 << TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT) |
(0x71 << TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT));
cec_write(cec, TEGRA_CEC_TX_TIMING_1,
(0x2f << TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT) |
(0x13 << TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT) |
(0x4b << TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT) |
(0x21 << TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT));
cec_write(cec, TEGRA_CEC_TX_TIMING_2,
(0x07 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT) |
(0x05 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT) |
(0x03 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT));
cec_write(cec, TEGRA_CEC_INT_MASK,
TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN |
TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD |
TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED |
TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |
TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |
TEGRA_CEC_INT_MASK_RX_REGISTER_FULL |
TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED);
cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);
return 0;
}
static int tegra_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
{
struct tegra_cec *cec = adap->priv;
u32 state = cec_read(cec, TEGRA_CEC_HW_CONTROL);
if (logical_addr == CEC_LOG_ADDR_INVALID)
state &= ~TEGRA_CEC_HWCTRL_RX_LADDR_MASK;
else
state |= TEGRA_CEC_HWCTRL_RX_LADDR((1 << logical_addr));
cec_write(cec, TEGRA_CEC_HW_CONTROL, state);
return 0;
}
static int tegra_cec_adap_monitor_all_enable(struct cec_adapter *adap,
bool enable)
{
struct tegra_cec *cec = adap->priv;
u32 reg = cec_read(cec, TEGRA_CEC_HW_CONTROL);
if (enable)
reg |= TEGRA_CEC_HWCTRL_RX_SNOOP;
else
reg &= ~TEGRA_CEC_HWCTRL_RX_SNOOP;
cec_write(cec, TEGRA_CEC_HW_CONTROL, reg);
return 0;
}
static int tegra_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time_ms, struct cec_msg *msg)
{
bool retry_xfer = signal_free_time_ms == CEC_SIGNAL_FREE_TIME_RETRY;
struct tegra_cec *cec = adap->priv;
unsigned int i;
u32 mode = 0;
u32 mask;
if (cec_msg_is_broadcast(msg))
mode = TEGRA_CEC_TX_REG_BCAST;
cec->tx_buf_cur = 0;
cec->tx_buf_cnt = msg->len;
for (i = 0; i < msg->len; i++) {
cec->tx_buf[i] = mode | msg->msg[i];
if (i == 0)
cec->tx_buf[i] |= TEGRA_CEC_TX_REG_START_BIT;
if (i == msg->len - 1)
cec->tx_buf[i] |= TEGRA_CEC_TX_REG_EOM;
if (i == 0 && retry_xfer)
cec->tx_buf[i] |= TEGRA_CEC_TX_REG_RETRY;
}
mask = cec_read(cec, TEGRA_CEC_INT_MASK);
cec_write(cec, TEGRA_CEC_INT_MASK,
mask | TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
return 0;
}
static const struct cec_adap_ops tegra_cec_ops = {
.adap_enable = tegra_cec_adap_enable,
.adap_log_addr = tegra_cec_adap_log_addr,
.adap_transmit = tegra_cec_adap_transmit,
.adap_monitor_all_enable = tegra_cec_adap_monitor_all_enable,
};
static int tegra_cec_probe(struct platform_device *pdev)
{
struct device *hdmi_dev;
struct tegra_cec *cec;
struct resource *res;
int ret = 0;
hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
if (IS_ERR(hdmi_dev))
return PTR_ERR(hdmi_dev);
cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev,
"Unable to allocate resources for device\n");
return -EBUSY;
}
if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
pdev->name)) {
dev_err(&pdev->dev,
"Unable to request mem region for device\n");
return -EBUSY;
}
cec->tegra_cec_irq = platform_get_irq(pdev, 0);
if (cec->tegra_cec_irq < 0)
return cec->tegra_cec_irq;
cec->cec_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!cec->cec_base) {
dev_err(&pdev->dev, "Unable to grab IOs for device\n");
return -EBUSY;
}
cec->clk = devm_clk_get(&pdev->dev, "cec");
if (IS_ERR_OR_NULL(cec->clk)) {
dev_err(&pdev->dev, "Can't get clock for CEC\n");
return -ENOENT;
}
ret = clk_prepare_enable(cec->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to prepare clock for CEC\n");
return ret;
}
/* set context info. */
cec->dev = &pdev->dev;
platform_set_drvdata(pdev, cec);
ret = devm_request_threaded_irq(&pdev->dev, cec->tegra_cec_irq,
tegra_cec_irq_handler, tegra_cec_irq_thread_handler,
0, "cec_irq", &pdev->dev);
if (ret) {
dev_err(&pdev->dev,
"Unable to request interrupt for device\n");
goto err_clk;
}
cec->adap = cec_allocate_adapter(&tegra_cec_ops, cec, TEGRA_CEC_NAME,
CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL |
CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(cec->adap)) {
ret = -ENOMEM;
dev_err(&pdev->dev, "Couldn't create cec adapter\n");
goto err_clk;
}
cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
cec->adap);
if (!cec->notifier) {
ret = -ENOMEM;
goto err_adapter;
}
ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Couldn't register device\n");
goto err_notifier;
}
return 0;
err_notifier:
cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_adapter:
cec_delete_adapter(cec->adap);
err_clk:
clk_disable_unprepare(cec->clk);
return ret;
}
static void tegra_cec_remove(struct platform_device *pdev)
{
struct tegra_cec *cec = platform_get_drvdata(pdev);
clk_disable_unprepare(cec->clk);
cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
}
#ifdef CONFIG_PM
static int tegra_cec_suspend(struct platform_device *pdev, pm_message_t state)
{
struct tegra_cec *cec = platform_get_drvdata(pdev);
clk_disable_unprepare(cec->clk);
dev_notice(&pdev->dev, "suspended\n");
return 0;
}
static int tegra_cec_resume(struct platform_device *pdev)
{
struct tegra_cec *cec = platform_get_drvdata(pdev);
dev_notice(&pdev->dev, "Resuming\n");
return clk_prepare_enable(cec->clk);
}
#endif
static const struct of_device_id tegra_cec_of_match[] = {
{ .compatible = "nvidia,tegra114-cec", },
{ .compatible = "nvidia,tegra124-cec", },
{ .compatible = "nvidia,tegra210-cec", },
{},
};
static struct platform_driver tegra_cec_driver = {
.driver = {
.name = TEGRA_CEC_NAME,
.of_match_table = tegra_cec_of_match,
},
.probe = tegra_cec_probe,
.remove_new = tegra_cec_remove,
#ifdef CONFIG_PM
.suspend = tegra_cec_suspend,
.resume = tegra_cec_resume,
#endif
};
module_platform_driver(tegra_cec_driver);
MODULE_DESCRIPTION("Tegra HDMI CEC driver");
MODULE_AUTHOR("NVIDIA CORPORATION");
MODULE_AUTHOR("Cisco Systems, Inc. and/or its affiliates");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/cec/platform/tegra/tegra_cec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* drivers/media/platform/s5p-cec/s5p_cec.c
*
* Samsung S5P CEC driver
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
*
* This driver is based on the "cec interface driver for exynos soc" by
* SangPil Moon.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <media/cec.h>
#include <media/cec-notifier.h>
#include "exynos_hdmi_cec.h"
#include "regs-cec.h"
#include "s5p_cec.h"
#define CEC_NAME "s5p-cec"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
int ret;
struct s5p_cec_dev *cec = cec_get_drvdata(adap);
if (enable) {
ret = pm_runtime_resume_and_get(cec->dev);
if (ret < 0)
return ret;
s5p_cec_reset(cec);
s5p_cec_set_divider(cec);
s5p_cec_threshold(cec);
s5p_cec_unmask_tx_interrupts(cec);
s5p_cec_unmask_rx_interrupts(cec);
s5p_cec_enable_rx(cec);
} else {
s5p_cec_mask_tx_interrupts(cec);
s5p_cec_mask_rx_interrupts(cec);
pm_runtime_put(cec->dev);
}
return 0;
}
static int s5p_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
{
struct s5p_cec_dev *cec = cec_get_drvdata(adap);
s5p_cec_set_addr(cec, addr);
return 0;
}
static int s5p_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct s5p_cec_dev *cec = cec_get_drvdata(adap);
/*
* Unclear if 0 retries are allowed by the hardware, so have 1 as
* the minimum.
*/
s5p_cec_copy_packet(cec, msg->msg, msg->len, max(1, attempts - 1));
return 0;
}
static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
{
struct s5p_cec_dev *cec = priv;
u32 status = 0;
status = s5p_cec_get_status(cec);
dev_dbg(cec->dev, "irq received\n");
if (status & CEC_STATUS_TX_DONE) {
if (status & CEC_STATUS_TX_NACK) {
dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
cec->tx = STATE_NACK;
} else if (status & CEC_STATUS_TX_ERROR) {
dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
cec->tx = STATE_ERROR;
} else {
dev_dbg(cec->dev, "CEC_STATUS_TX_DONE\n");
cec->tx = STATE_DONE;
}
s5p_clr_pending_tx(cec);
}
if (status & CEC_STATUS_RX_DONE) {
if (status & CEC_STATUS_RX_ERROR) {
dev_dbg(cec->dev, "CEC_STATUS_RX_ERROR set\n");
s5p_cec_rx_reset(cec);
s5p_cec_enable_rx(cec);
} else {
dev_dbg(cec->dev, "CEC_STATUS_RX_DONE set\n");
if (cec->rx != STATE_IDLE)
dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
cec->rx = STATE_BUSY;
cec->msg.len = status >> 24;
if (cec->msg.len > CEC_MAX_MSG_SIZE)
cec->msg.len = CEC_MAX_MSG_SIZE;
cec->msg.rx_status = CEC_RX_STATUS_OK;
s5p_cec_get_rx_buf(cec, cec->msg.len,
cec->msg.msg);
cec->rx = STATE_DONE;
s5p_cec_enable_rx(cec);
}
/* Clear interrupt pending bit */
s5p_clr_pending_rx(cec);
}
return IRQ_WAKE_THREAD;
}
static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
{
struct s5p_cec_dev *cec = priv;
dev_dbg(cec->dev, "irq processing thread\n");
switch (cec->tx) {
case STATE_DONE:
cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
cec->tx = STATE_IDLE;
break;
case STATE_NACK:
cec_transmit_done(cec->adap,
CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
0, 1, 0, 0);
cec->tx = STATE_IDLE;
break;
case STATE_ERROR:
cec_transmit_done(cec->adap,
CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
0, 0, 0, 1);
cec->tx = STATE_IDLE;
break;
case STATE_BUSY:
dev_err(cec->dev, "state set to busy, this should not occur here\n");
break;
default:
break;
}
switch (cec->rx) {
case STATE_DONE:
cec_received_msg(cec->adap, &cec->msg);
cec->rx = STATE_IDLE;
break;
default:
break;
}
return IRQ_HANDLED;
}
static const struct cec_adap_ops s5p_cec_adap_ops = {
.adap_enable = s5p_cec_adap_enable,
.adap_log_addr = s5p_cec_adap_log_addr,
.adap_transmit = s5p_cec_adap_transmit,
};
static int s5p_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *hdmi_dev;
struct s5p_cec_dev *cec;
bool needs_hpd = of_property_read_bool(pdev->dev.of_node, "needs-hpd");
int ret;
hdmi_dev = cec_notifier_parse_hdmi_phandle(dev);
if (IS_ERR(hdmi_dev))
return PTR_ERR(hdmi_dev);
cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
cec->dev = dev;
cec->irq = platform_get_irq(pdev, 0);
if (cec->irq < 0)
return cec->irq;
ret = devm_request_threaded_irq(dev, cec->irq, s5p_cec_irq_handler,
s5p_cec_irq_handler_thread, 0, pdev->name, cec);
if (ret)
return ret;
cec->clk = devm_clk_get(dev, "hdmicec");
if (IS_ERR(cec->clk))
return PTR_ERR(cec->clk);
cec->pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,syscon-phandle");
if (IS_ERR(cec->pmu))
return -EPROBE_DEFER;
cec->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->reg))
return PTR_ERR(cec->reg);
cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec, CEC_NAME,
CEC_CAP_DEFAULTS | (needs_hpd ? CEC_CAP_NEEDS_HPD : 0) |
CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
cec->adap);
if (!cec->notifier) {
ret = -ENOMEM;
goto err_delete_adapter;
}
ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret)
goto err_notifier;
platform_set_drvdata(pdev, cec);
pm_runtime_enable(dev);
dev_dbg(dev, "successfully probed\n");
return 0;
err_notifier:
cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_delete_adapter:
cec_delete_adapter(cec->adap);
return ret;
}
static void s5p_cec_remove(struct platform_device *pdev)
{
struct s5p_cec_dev *cec = platform_get_drvdata(pdev);
cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
pm_runtime_disable(&pdev->dev);
}
static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev)
{
struct s5p_cec_dev *cec = dev_get_drvdata(dev);
clk_disable_unprepare(cec->clk);
return 0;
}
static int __maybe_unused s5p_cec_runtime_resume(struct device *dev)
{
struct s5p_cec_dev *cec = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(cec->clk);
if (ret < 0)
return ret;
return 0;
}
static const struct dev_pm_ops s5p_cec_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(s5p_cec_runtime_suspend, s5p_cec_runtime_resume,
NULL)
};
static const struct of_device_id s5p_cec_match[] = {
{
.compatible = "samsung,s5p-cec",
},
{},
};
MODULE_DEVICE_TABLE(of, s5p_cec_match);
static struct platform_driver s5p_cec_pdrv = {
.probe = s5p_cec_probe,
.remove_new = s5p_cec_remove,
.driver = {
.name = CEC_NAME,
.of_match_table = s5p_cec_match,
.pm = &s5p_cec_pm_ops,
},
};
module_platform_driver(s5p_cec_pdrv);
MODULE_AUTHOR("Kamil Debski <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Samsung S5P CEC driver");
| linux-master | drivers/media/cec/platform/s5p/s5p_cec.c |
// SPDX-License-Identifier: GPL-2.0-only
/* drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
*
* Copyright (c) 2009, 2014 Samsung Electronics
* http://www.samsung.com/
*
* cec ftn file for Samsung TVOUT driver
*/
#include <linux/io.h>
#include <linux/device.h>
#include "exynos_hdmi_cec.h"
#include "regs-cec.h"
#define S5P_HDMI_FIN 24000000
#define CEC_DIV_RATIO 320000
#define CEC_MESSAGE_BROADCAST_MASK 0x0F
#define CEC_MESSAGE_BROADCAST 0x0F
#define CEC_FILTER_THRESHOLD 0x15
void s5p_cec_set_divider(struct s5p_cec_dev *cec)
{
u32 div_ratio, div_val;
unsigned int reg;
div_ratio = S5P_HDMI_FIN / CEC_DIV_RATIO - 1;
if (regmap_read(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, ®)) {
dev_err(cec->dev, "failed to read phy control\n");
return;
}
reg = (reg & ~(0x3FF << 16)) | (div_ratio << 16);
if (regmap_write(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, reg)) {
dev_err(cec->dev, "failed to write phy control\n");
return;
}
div_val = CEC_DIV_RATIO * 0.00005 - 1;
writeb(0x0, cec->reg + S5P_CEC_DIVISOR_3);
writeb(0x0, cec->reg + S5P_CEC_DIVISOR_2);
writeb(0x0, cec->reg + S5P_CEC_DIVISOR_1);
writeb(div_val, cec->reg + S5P_CEC_DIVISOR_0);
}
void s5p_cec_enable_rx(struct s5p_cec_dev *cec)
{
u8 reg;
reg = readb(cec->reg + S5P_CEC_RX_CTRL);
reg |= S5P_CEC_RX_CTRL_ENABLE;
writeb(reg, cec->reg + S5P_CEC_RX_CTRL);
}
void s5p_cec_mask_rx_interrupts(struct s5p_cec_dev *cec)
{
u8 reg;
reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
reg |= S5P_CEC_IRQ_RX_DONE;
reg |= S5P_CEC_IRQ_RX_ERROR;
writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
}
void s5p_cec_unmask_rx_interrupts(struct s5p_cec_dev *cec)
{
u8 reg;
reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
reg &= ~S5P_CEC_IRQ_RX_DONE;
reg &= ~S5P_CEC_IRQ_RX_ERROR;
writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
}
void s5p_cec_mask_tx_interrupts(struct s5p_cec_dev *cec)
{
u8 reg;
reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
reg |= S5P_CEC_IRQ_TX_DONE;
reg |= S5P_CEC_IRQ_TX_ERROR;
writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
}
void s5p_cec_unmask_tx_interrupts(struct s5p_cec_dev *cec)
{
u8 reg;
reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
reg &= ~S5P_CEC_IRQ_TX_DONE;
reg &= ~S5P_CEC_IRQ_TX_ERROR;
writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
}
void s5p_cec_reset(struct s5p_cec_dev *cec)
{
u8 reg;
writeb(S5P_CEC_RX_CTRL_RESET, cec->reg + S5P_CEC_RX_CTRL);
writeb(S5P_CEC_TX_CTRL_RESET, cec->reg + S5P_CEC_TX_CTRL);
reg = readb(cec->reg + 0xc4);
reg &= ~0x1;
writeb(reg, cec->reg + 0xc4);
}
void s5p_cec_tx_reset(struct s5p_cec_dev *cec)
{
writeb(S5P_CEC_TX_CTRL_RESET, cec->reg + S5P_CEC_TX_CTRL);
}
void s5p_cec_rx_reset(struct s5p_cec_dev *cec)
{
u8 reg;
writeb(S5P_CEC_RX_CTRL_RESET, cec->reg + S5P_CEC_RX_CTRL);
reg = readb(cec->reg + 0xc4);
reg &= ~0x1;
writeb(reg, cec->reg + 0xc4);
}
void s5p_cec_threshold(struct s5p_cec_dev *cec)
{
writeb(CEC_FILTER_THRESHOLD, cec->reg + S5P_CEC_RX_FILTER_TH);
writeb(0, cec->reg + S5P_CEC_RX_FILTER_CTRL);
}
void s5p_cec_copy_packet(struct s5p_cec_dev *cec, char *data,
size_t count, u8 retries)
{
int i = 0;
u8 reg;
while (i < count) {
writeb(data[i], cec->reg + (S5P_CEC_TX_BUFF0 + (i * 4)));
i++;
}
writeb(count, cec->reg + S5P_CEC_TX_BYTES);
reg = readb(cec->reg + S5P_CEC_TX_CTRL);
reg |= S5P_CEC_TX_CTRL_START;
reg &= ~0x70;
reg |= retries << 4;
if ((data[0] & CEC_MESSAGE_BROADCAST_MASK) == CEC_MESSAGE_BROADCAST) {
dev_dbg(cec->dev, "Broadcast");
reg |= S5P_CEC_TX_CTRL_BCAST;
} else {
dev_dbg(cec->dev, "No Broadcast");
reg &= ~S5P_CEC_TX_CTRL_BCAST;
}
writeb(reg, cec->reg + S5P_CEC_TX_CTRL);
dev_dbg(cec->dev, "cec-tx: cec count (%zu): %*ph", count,
(int)count, data);
}
void s5p_cec_set_addr(struct s5p_cec_dev *cec, u32 addr)
{
writeb(addr & 0x0F, cec->reg + S5P_CEC_LOGIC_ADDR);
}
u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
{
u32 status = 0;
status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
dev_dbg(cec->dev, "status = 0x%x!\n", status);
return status;
}
void s5p_clr_pending_tx(struct s5p_cec_dev *cec)
{
writeb(S5P_CEC_IRQ_TX_DONE | S5P_CEC_IRQ_TX_ERROR,
cec->reg + S5P_CEC_IRQ_CLEAR);
}
void s5p_clr_pending_rx(struct s5p_cec_dev *cec)
{
writeb(S5P_CEC_IRQ_RX_DONE | S5P_CEC_IRQ_RX_ERROR,
cec->reg + S5P_CEC_IRQ_CLEAR);
}
void s5p_cec_get_rx_buf(struct s5p_cec_dev *cec, u32 size, u8 *buffer)
{
u32 i = 0;
char debug[40];
while (i < size) {
buffer[i] = readb(cec->reg + S5P_CEC_RX_BUFF0 + (i * 4));
sprintf(debug + i * 2, "%02x ", buffer[i]);
i++;
}
dev_dbg(cec->dev, "cec-rx: cec size(%d): %s", size, debug);
}
| linux-master | drivers/media/cec/platform/s5p/exynos_hdmi_cecctrl.c |
/*
* Driver for Amlogic Meson AO CEC Controller
*
* Copyright (C) 2015 Amlogic, Inc. All rights reserved
* Copyright (C) 2017 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
#include <media/cec.h>
#include <media/cec-notifier.h>
/* CEC Registers */
/*
* [2:1] cntl_clk
* - 0 = Disable clk (Power-off mode)
* - 1 = Enable gated clock (Normal mode)
* - 2 = Enable free-run clk (Debug mode)
*/
#define CEC_GEN_CNTL_REG 0x00
#define CEC_GEN_CNTL_RESET BIT(0)
#define CEC_GEN_CNTL_CLK_DISABLE 0
#define CEC_GEN_CNTL_CLK_ENABLE 1
#define CEC_GEN_CNTL_CLK_ENABLE_DBG 2
#define CEC_GEN_CNTL_CLK_CTRL_MASK GENMASK(2, 1)
/*
* [7:0] cec_reg_addr
* [15:8] cec_reg_wrdata
* [16] cec_reg_wr
* - 0 = Read
* - 1 = Write
* [23] bus free
* [31:24] cec_reg_rddata
*/
#define CEC_RW_REG 0x04
#define CEC_RW_ADDR GENMASK(7, 0)
#define CEC_RW_WR_DATA GENMASK(15, 8)
#define CEC_RW_WRITE_EN BIT(16)
#define CEC_RW_BUS_BUSY BIT(23)
#define CEC_RW_RD_DATA GENMASK(31, 24)
/*
* [1] tx intr
* [2] rx intr
*/
#define CEC_INTR_MASKN_REG 0x08
#define CEC_INTR_CLR_REG 0x0c
#define CEC_INTR_STAT_REG 0x10
#define CEC_INTR_TX BIT(1)
#define CEC_INTR_RX BIT(2)
/* CEC Commands */
#define CEC_TX_MSG_0_HEADER 0x00
#define CEC_TX_MSG_1_OPCODE 0x01
#define CEC_TX_MSG_2_OP1 0x02
#define CEC_TX_MSG_3_OP2 0x03
#define CEC_TX_MSG_4_OP3 0x04
#define CEC_TX_MSG_5_OP4 0x05
#define CEC_TX_MSG_6_OP5 0x06
#define CEC_TX_MSG_7_OP6 0x07
#define CEC_TX_MSG_8_OP7 0x08
#define CEC_TX_MSG_9_OP8 0x09
#define CEC_TX_MSG_A_OP9 0x0A
#define CEC_TX_MSG_B_OP10 0x0B
#define CEC_TX_MSG_C_OP11 0x0C
#define CEC_TX_MSG_D_OP12 0x0D
#define CEC_TX_MSG_E_OP13 0x0E
#define CEC_TX_MSG_F_OP14 0x0F
#define CEC_TX_MSG_LENGTH 0x10
#define CEC_TX_MSG_CMD 0x11
#define CEC_TX_WRITE_BUF 0x12
#define CEC_TX_CLEAR_BUF 0x13
#define CEC_RX_MSG_CMD 0x14
#define CEC_RX_CLEAR_BUF 0x15
#define CEC_LOGICAL_ADDR0 0x16
#define CEC_LOGICAL_ADDR1 0x17
#define CEC_LOGICAL_ADDR2 0x18
#define CEC_LOGICAL_ADDR3 0x19
#define CEC_LOGICAL_ADDR4 0x1A
#define CEC_CLOCK_DIV_H 0x1B
#define CEC_CLOCK_DIV_L 0x1C
#define CEC_QUIESCENT_25MS_BIT7_0 0x20
#define CEC_QUIESCENT_25MS_BIT11_8 0x21
#define CEC_STARTBITMINL2H_3MS5_BIT7_0 0x22
#define CEC_STARTBITMINL2H_3MS5_BIT8 0x23
#define CEC_STARTBITMAXL2H_3MS9_BIT7_0 0x24
#define CEC_STARTBITMAXL2H_3MS9_BIT8 0x25
#define CEC_STARTBITMINH_0MS6_BIT7_0 0x26
#define CEC_STARTBITMINH_0MS6_BIT8 0x27
#define CEC_STARTBITMAXH_1MS0_BIT7_0 0x28
#define CEC_STARTBITMAXH_1MS0_BIT8 0x29
#define CEC_STARTBITMINTOT_4MS3_BIT7_0 0x2A
#define CEC_STARTBITMINTOT_4MS3_BIT9_8 0x2B
#define CEC_STARTBITMAXTOT_4MS7_BIT7_0 0x2C
#define CEC_STARTBITMAXTOT_4MS7_BIT9_8 0x2D
#define CEC_LOGIC1MINL2H_0MS4_BIT7_0 0x2E
#define CEC_LOGIC1MINL2H_0MS4_BIT8 0x2F
#define CEC_LOGIC1MAXL2H_0MS8_BIT7_0 0x30
#define CEC_LOGIC1MAXL2H_0MS8_BIT8 0x31
#define CEC_LOGIC0MINL2H_1MS3_BIT7_0 0x32
#define CEC_LOGIC0MINL2H_1MS3_BIT8 0x33
#define CEC_LOGIC0MAXL2H_1MS7_BIT7_0 0x34
#define CEC_LOGIC0MAXL2H_1MS7_BIT8 0x35
#define CEC_LOGICMINTOTAL_2MS05_BIT7_0 0x36
#define CEC_LOGICMINTOTAL_2MS05_BIT9_8 0x37
#define CEC_LOGICMAXHIGH_2MS8_BIT7_0 0x38
#define CEC_LOGICMAXHIGH_2MS8_BIT8 0x39
#define CEC_LOGICERRLOW_3MS4_BIT7_0 0x3A
#define CEC_LOGICERRLOW_3MS4_BIT8 0x3B
#define CEC_NOMSMPPOINT_1MS05 0x3C
#define CEC_DELCNTR_LOGICERR 0x3E
#define CEC_TXTIME_17MS_BIT7_0 0x40
#define CEC_TXTIME_17MS_BIT10_8 0x41
#define CEC_TXTIME_2BIT_BIT7_0 0x42
#define CEC_TXTIME_2BIT_BIT10_8 0x43
#define CEC_TXTIME_4BIT_BIT7_0 0x44
#define CEC_TXTIME_4BIT_BIT10_8 0x45
#define CEC_STARTBITNOML2H_3MS7_BIT7_0 0x46
#define CEC_STARTBITNOML2H_3MS7_BIT8 0x47
#define CEC_STARTBITNOMH_0MS8_BIT7_0 0x48
#define CEC_STARTBITNOMH_0MS8_BIT8 0x49
#define CEC_LOGIC1NOML2H_0MS6_BIT7_0 0x4A
#define CEC_LOGIC1NOML2H_0MS6_BIT8 0x4B
#define CEC_LOGIC0NOML2H_1MS5_BIT7_0 0x4C
#define CEC_LOGIC0NOML2H_1MS5_BIT8 0x4D
#define CEC_LOGIC1NOMH_1MS8_BIT7_0 0x4E
#define CEC_LOGIC1NOMH_1MS8_BIT8 0x4F
#define CEC_LOGIC0NOMH_0MS9_BIT7_0 0x50
#define CEC_LOGIC0NOMH_0MS9_BIT8 0x51
#define CEC_LOGICERRLOW_3MS6_BIT7_0 0x52
#define CEC_LOGICERRLOW_3MS6_BIT8 0x53
#define CEC_CHKCONTENTION_0MS1 0x54
#define CEC_PREPARENXTBIT_0MS05_BIT7_0 0x56
#define CEC_PREPARENXTBIT_0MS05_BIT8 0x57
#define CEC_NOMSMPACKPOINT_0MS45 0x58
#define CEC_ACK0NOML2H_1MS5_BIT7_0 0x5A
#define CEC_ACK0NOML2H_1MS5_BIT8 0x5B
#define CEC_BUGFIX_DISABLE_0 0x60
#define CEC_BUGFIX_DISABLE_1 0x61
#define CEC_RX_MSG_0_HEADER 0x80
#define CEC_RX_MSG_1_OPCODE 0x81
#define CEC_RX_MSG_2_OP1 0x82
#define CEC_RX_MSG_3_OP2 0x83
#define CEC_RX_MSG_4_OP3 0x84
#define CEC_RX_MSG_5_OP4 0x85
#define CEC_RX_MSG_6_OP5 0x86
#define CEC_RX_MSG_7_OP6 0x87
#define CEC_RX_MSG_8_OP7 0x88
#define CEC_RX_MSG_9_OP8 0x89
#define CEC_RX_MSG_A_OP9 0x8A
#define CEC_RX_MSG_B_OP10 0x8B
#define CEC_RX_MSG_C_OP11 0x8C
#define CEC_RX_MSG_D_OP12 0x8D
#define CEC_RX_MSG_E_OP13 0x8E
#define CEC_RX_MSG_F_OP14 0x8F
#define CEC_RX_MSG_LENGTH 0x90
#define CEC_RX_MSG_STATUS 0x91
#define CEC_RX_NUM_MSG 0x92
#define CEC_TX_MSG_STATUS 0x93
#define CEC_TX_NUM_MSG 0x94
/* CEC_TX_MSG_CMD definition */
#define TX_NO_OP 0 /* No transaction */
#define TX_REQ_CURRENT 1 /* Transmit earliest message in buffer */
#define TX_ABORT 2 /* Abort transmitting earliest message */
#define TX_REQ_NEXT 3 /* Overwrite earliest msg, transmit next */
/* tx_msg_status definition */
#define TX_IDLE 0 /* No transaction */
#define TX_BUSY 1 /* Transmitter is busy */
#define TX_DONE 2 /* Message successfully transmitted */
#define TX_ERROR 3 /* Message transmitted with error */
/* rx_msg_cmd */
#define RX_NO_OP 0 /* No transaction */
#define RX_ACK_CURRENT 1 /* Read earliest message in buffer */
#define RX_DISABLE 2 /* Disable receiving latest message */
#define RX_ACK_NEXT 3 /* Clear earliest msg, read next */
/* rx_msg_status */
#define RX_IDLE 0 /* No transaction */
#define RX_BUSY 1 /* Receiver is busy */
#define RX_DONE 2 /* Message has been received successfully */
#define RX_ERROR 3 /* Message has been received with error */
/* RX_CLEAR_BUF options */
#define CLEAR_START 1
#define CLEAR_STOP 0
/* CEC_LOGICAL_ADDRx options */
#define LOGICAL_ADDR_MASK 0xf
#define LOGICAL_ADDR_VALID BIT(4)
#define LOGICAL_ADDR_DISABLE 0
#define CEC_CLK_RATE 32768
struct meson_ao_cec_device {
struct platform_device *pdev;
void __iomem *base;
struct clk *core;
spinlock_t cec_reg_lock;
struct cec_notifier *notify;
struct cec_adapter *adap;
struct cec_msg rx_msg;
};
#define writel_bits_relaxed(mask, val, addr) \
writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
static inline int meson_ao_cec_wait_busy(struct meson_ao_cec_device *ao_cec)
{
ktime_t timeout = ktime_add_us(ktime_get(), 5000);
while (readl_relaxed(ao_cec->base + CEC_RW_REG) & CEC_RW_BUS_BUSY) {
if (ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
}
return 0;
}
static void meson_ao_cec_read(struct meson_ao_cec_device *ao_cec,
unsigned long address, u8 *data,
int *res)
{
unsigned long flags;
u32 reg = FIELD_PREP(CEC_RW_ADDR, address);
int ret = 0;
if (res && *res)
return;
spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
ret = meson_ao_cec_wait_busy(ao_cec);
if (ret)
goto read_out;
writel_relaxed(reg, ao_cec->base + CEC_RW_REG);
ret = meson_ao_cec_wait_busy(ao_cec);
if (ret)
goto read_out;
*data = FIELD_GET(CEC_RW_RD_DATA,
readl_relaxed(ao_cec->base + CEC_RW_REG));
read_out:
spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
if (res)
*res = ret;
}
static void meson_ao_cec_write(struct meson_ao_cec_device *ao_cec,
unsigned long address, u8 data,
int *res)
{
unsigned long flags;
u32 reg = FIELD_PREP(CEC_RW_ADDR, address) |
FIELD_PREP(CEC_RW_WR_DATA, data) |
CEC_RW_WRITE_EN;
int ret = 0;
if (res && *res)
return;
spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
ret = meson_ao_cec_wait_busy(ao_cec);
if (ret)
goto write_out;
writel_relaxed(reg, ao_cec->base + CEC_RW_REG);
write_out:
spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
if (res)
*res = ret;
}
static inline void meson_ao_cec_irq_setup(struct meson_ao_cec_device *ao_cec,
bool enable)
{
u32 cfg = CEC_INTR_TX | CEC_INTR_RX;
writel_bits_relaxed(cfg, enable ? cfg : 0,
ao_cec->base + CEC_INTR_MASKN_REG);
}
static inline int meson_ao_cec_clear(struct meson_ao_cec_device *ao_cec)
{
int ret = 0;
meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_DISABLE, &ret);
meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret);
meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 1, &ret);
meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 1, &ret);
if (ret)
return ret;
udelay(100);
meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 0, &ret);
meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 0, &ret);
if (ret)
return ret;
udelay(100);
meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret);
meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret);
return ret;
}
static int meson_ao_cec_arbit_bit_time_set(struct meson_ao_cec_device *ao_cec,
unsigned int bit_set,
unsigned int time_set)
{
int ret = 0;
switch (bit_set) {
case CEC_SIGNAL_FREE_TIME_RETRY:
meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT7_0,
time_set & 0xff, &ret);
meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT10_8,
(time_set >> 8) & 0x7, &ret);
break;
case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR:
meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT7_0,
time_set & 0xff, &ret);
meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT10_8,
(time_set >> 8) & 0x7, &ret);
break;
case CEC_SIGNAL_FREE_TIME_NEXT_XFER:
meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT7_0,
time_set & 0xff, &ret);
meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT10_8,
(time_set >> 8) & 0x7, &ret);
break;
}
return ret;
}
static irqreturn_t meson_ao_cec_irq(int irq, void *data)
{
struct meson_ao_cec_device *ao_cec = data;
u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG);
if (stat)
return IRQ_WAKE_THREAD;
return IRQ_NONE;
}
static void meson_ao_cec_irq_tx(struct meson_ao_cec_device *ao_cec)
{
unsigned long tx_status = 0;
u8 stat;
int ret = 0;
meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, &stat, &ret);
if (ret)
goto tx_reg_err;
switch (stat) {
case TX_DONE:
tx_status = CEC_TX_STATUS_OK;
break;
case TX_BUSY:
tx_status = CEC_TX_STATUS_ARB_LOST;
break;
case TX_IDLE:
tx_status = CEC_TX_STATUS_LOW_DRIVE;
break;
case TX_ERROR:
default:
tx_status = CEC_TX_STATUS_NACK;
break;
}
/* Clear Interruption */
writel_relaxed(CEC_INTR_TX, ao_cec->base + CEC_INTR_CLR_REG);
/* Stop TX */
meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret);
if (ret)
goto tx_reg_err;
cec_transmit_attempt_done(ao_cec->adap, tx_status);
return;
tx_reg_err:
cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ERROR);
}
static void meson_ao_cec_irq_rx(struct meson_ao_cec_device *ao_cec)
{
int i, ret = 0;
u8 reg;
meson_ao_cec_read(ao_cec, CEC_RX_MSG_STATUS, ®, &ret);
if (reg != RX_DONE)
goto rx_out;
meson_ao_cec_read(ao_cec, CEC_RX_NUM_MSG, ®, &ret);
if (reg != 1)
goto rx_out;
meson_ao_cec_read(ao_cec, CEC_RX_MSG_LENGTH, ®, &ret);
ao_cec->rx_msg.len = reg + 1;
if (ao_cec->rx_msg.len > CEC_MAX_MSG_SIZE)
ao_cec->rx_msg.len = CEC_MAX_MSG_SIZE;
for (i = 0; i < ao_cec->rx_msg.len; i++) {
u8 byte;
meson_ao_cec_read(ao_cec, CEC_RX_MSG_0_HEADER + i, &byte, &ret);
ao_cec->rx_msg.msg[i] = byte;
}
if (ret)
goto rx_out;
cec_received_msg(ao_cec->adap, &ao_cec->rx_msg);
rx_out:
/* Clear Interruption */
writel_relaxed(CEC_INTR_RX, ao_cec->base + CEC_INTR_CLR_REG);
/* Ack RX message */
meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_ACK_CURRENT, &ret);
meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret);
/* Clear RX buffer */
meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_START, &ret);
meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_STOP, &ret);
}
static irqreturn_t meson_ao_cec_irq_thread(int irq, void *data)
{
struct meson_ao_cec_device *ao_cec = data;
u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG);
if (stat & CEC_INTR_TX)
meson_ao_cec_irq_tx(ao_cec);
meson_ao_cec_irq_rx(ao_cec);
return IRQ_HANDLED;
}
static int meson_ao_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
{
struct meson_ao_cec_device *ao_cec = adap->priv;
int ret = 0;
meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0,
LOGICAL_ADDR_DISABLE, &ret);
if (ret)
return ret;
ret = meson_ao_cec_clear(ao_cec);
if (ret)
return ret;
if (logical_addr == CEC_LOG_ADDR_INVALID)
return 0;
meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0,
logical_addr & LOGICAL_ADDR_MASK, &ret);
if (ret)
return ret;
udelay(100);
meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0,
(logical_addr & LOGICAL_ADDR_MASK) |
LOGICAL_ADDR_VALID, &ret);
return ret;
}
static int meson_ao_cec_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct meson_ao_cec_device *ao_cec = adap->priv;
int i, ret = 0;
u8 reg;
meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, ®, &ret);
if (ret)
return ret;
if (reg == TX_BUSY) {
dev_dbg(&ao_cec->pdev->dev, "%s: busy TX: aborting\n",
__func__);
meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret);
}
for (i = 0; i < msg->len; i++) {
meson_ao_cec_write(ao_cec, CEC_TX_MSG_0_HEADER + i,
msg->msg[i], &ret);
}
meson_ao_cec_write(ao_cec, CEC_TX_MSG_LENGTH, msg->len - 1, &ret);
meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_REQ_CURRENT, &ret);
return ret;
}
static int meson_ao_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct meson_ao_cec_device *ao_cec = adap->priv;
int ret;
meson_ao_cec_irq_setup(ao_cec, false);
writel_bits_relaxed(CEC_GEN_CNTL_RESET, CEC_GEN_CNTL_RESET,
ao_cec->base + CEC_GEN_CNTL_REG);
if (!enable)
return 0;
/* Enable gated clock (Normal mode). */
writel_bits_relaxed(CEC_GEN_CNTL_CLK_CTRL_MASK,
FIELD_PREP(CEC_GEN_CNTL_CLK_CTRL_MASK,
CEC_GEN_CNTL_CLK_ENABLE),
ao_cec->base + CEC_GEN_CNTL_REG);
udelay(100);
/* Release Reset */
writel_bits_relaxed(CEC_GEN_CNTL_RESET, 0,
ao_cec->base + CEC_GEN_CNTL_REG);
/* Clear buffers */
ret = meson_ao_cec_clear(ao_cec);
if (ret)
return ret;
/* CEC arbitration 3/5/7 bit time set. */
ret = meson_ao_cec_arbit_bit_time_set(ao_cec,
CEC_SIGNAL_FREE_TIME_RETRY,
0x118);
if (ret)
return ret;
ret = meson_ao_cec_arbit_bit_time_set(ao_cec,
CEC_SIGNAL_FREE_TIME_NEW_INITIATOR,
0x000);
if (ret)
return ret;
ret = meson_ao_cec_arbit_bit_time_set(ao_cec,
CEC_SIGNAL_FREE_TIME_NEXT_XFER,
0x2aa);
if (ret)
return ret;
meson_ao_cec_irq_setup(ao_cec, true);
return 0;
}
static const struct cec_adap_ops meson_ao_cec_ops = {
.adap_enable = meson_ao_cec_adap_enable,
.adap_log_addr = meson_ao_cec_set_log_addr,
.adap_transmit = meson_ao_cec_transmit,
};
static int meson_ao_cec_probe(struct platform_device *pdev)
{
struct meson_ao_cec_device *ao_cec;
struct device *hdmi_dev;
int ret, irq;
hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
if (IS_ERR(hdmi_dev))
return PTR_ERR(hdmi_dev);
ao_cec = devm_kzalloc(&pdev->dev, sizeof(*ao_cec), GFP_KERNEL);
if (!ao_cec)
return -ENOMEM;
spin_lock_init(&ao_cec->cec_reg_lock);
ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_ops, ao_cec,
"meson_ao_cec",
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO,
1); /* Use 1 for now */
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
ao_cec->adap->owner = THIS_MODULE;
ao_cec->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ao_cec->base)) {
ret = PTR_ERR(ao_cec->base);
goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
ret = devm_request_threaded_irq(&pdev->dev, irq,
meson_ao_cec_irq,
meson_ao_cec_irq_thread,
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
goto out_probe_adapter;
}
ao_cec->core = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(ao_cec->core)) {
dev_err(&pdev->dev, "core clock request failed\n");
ret = PTR_ERR(ao_cec->core);
goto out_probe_adapter;
}
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
goto out_probe_adapter;
}
ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE);
if (ret) {
dev_err(&pdev->dev, "core clock set rate failed\n");
goto out_probe_clk;
}
device_reset_optional(&pdev->dev);
ao_cec->pdev = pdev;
platform_set_drvdata(pdev, ao_cec);
ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
ao_cec->adap);
if (!ao_cec->notify) {
ret = -ENOMEM;
goto out_probe_clk;
}
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
goto out_probe_notify;
/* Setup Hardware */
writel_relaxed(CEC_GEN_CNTL_RESET,
ao_cec->base + CEC_GEN_CNTL_REG);
return 0;
out_probe_notify:
cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
out_probe_clk:
clk_disable_unprepare(ao_cec->core);
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
dev_err(&pdev->dev, "CEC controller registration failed\n");
return ret;
}
static void meson_ao_cec_remove(struct platform_device *pdev)
{
struct meson_ao_cec_device *ao_cec = platform_get_drvdata(pdev);
clk_disable_unprepare(ao_cec->core);
cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
cec_unregister_adapter(ao_cec->adap);
}
static const struct of_device_id meson_ao_cec_of_match[] = {
{ .compatible = "amlogic,meson-gx-ao-cec", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_ao_cec_of_match);
static struct platform_driver meson_ao_cec_driver = {
.probe = meson_ao_cec_probe,
.remove_new = meson_ao_cec_remove,
.driver = {
.name = "meson-ao-cec",
.of_match_table = meson_ao_cec_of_match,
},
};
module_platform_driver(meson_ao_cec_driver);
MODULE_DESCRIPTION("Meson AO CEC Controller driver");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/cec/platform/meson/ao-cec.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Amlogic Meson AO CEC G12A Controller
*
* Copyright (C) 2017 Amlogic, Inc. All rights reserved
* Copyright (C) 2019 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <media/cec.h>
#include <media/cec-notifier.h>
#include <linux/clk-provider.h>
/* CEC Registers */
#define CECB_CLK_CNTL_REG0 0x00
#define CECB_CLK_CNTL_N1 GENMASK(11, 0)
#define CECB_CLK_CNTL_N2 GENMASK(23, 12)
#define CECB_CLK_CNTL_DUAL_EN BIT(28)
#define CECB_CLK_CNTL_OUTPUT_EN BIT(30)
#define CECB_CLK_CNTL_INPUT_EN BIT(31)
#define CECB_CLK_CNTL_REG1 0x04
#define CECB_CLK_CNTL_M1 GENMASK(11, 0)
#define CECB_CLK_CNTL_M2 GENMASK(23, 12)
#define CECB_CLK_CNTL_BYPASS_EN BIT(24)
/*
* [14:12] Filter_del. For glitch-filtering CEC line, ignore signal
* change pulse width < filter_del * T(filter_tick) * 3.
* [9:8] Filter_tick_sel: Select which periodical pulse for
* glitch-filtering CEC line signal.
* - 0=Use T(xtal)*3 = 125ns;
* - 1=Use once-per-1us pulse;
* - 2=Use once-per-10us pulse;
* - 3=Use once-per-100us pulse.
* [3] Sysclk_en. 0=Disable system clock; 1=Enable system clock.
* [2:1] cntl_clk
* - 0 = Disable clk (Power-off mode)
* - 1 = Enable gated clock (Normal mode)
* - 2 = Enable free-run clk (Debug mode)
* [0] SW_RESET 1=Apply reset; 0=No reset.
*/
#define CECB_GEN_CNTL_REG 0x08
#define CECB_GEN_CNTL_RESET BIT(0)
#define CECB_GEN_CNTL_CLK_DISABLE 0
#define CECB_GEN_CNTL_CLK_ENABLE 1
#define CECB_GEN_CNTL_CLK_ENABLE_DBG 2
#define CECB_GEN_CNTL_CLK_CTRL_MASK GENMASK(2, 1)
#define CECB_GEN_CNTL_SYS_CLK_EN BIT(3)
#define CECB_GEN_CNTL_FILTER_TICK_125NS 0
#define CECB_GEN_CNTL_FILTER_TICK_1US 1
#define CECB_GEN_CNTL_FILTER_TICK_10US 2
#define CECB_GEN_CNTL_FILTER_TICK_100US 3
#define CECB_GEN_CNTL_FILTER_TICK_SEL GENMASK(9, 8)
#define CECB_GEN_CNTL_FILTER_DEL GENMASK(14, 12)
/*
* [7:0] cec_reg_addr
* [15:8] cec_reg_wrdata
* [16] cec_reg_wr
* - 0 = Read
* - 1 = Write
* [31:24] cec_reg_rddata
*/
#define CECB_RW_REG 0x0c
#define CECB_RW_ADDR GENMASK(7, 0)
#define CECB_RW_WR_DATA GENMASK(15, 8)
#define CECB_RW_WRITE_EN BIT(16)
#define CECB_RW_BUS_BUSY BIT(23)
#define CECB_RW_RD_DATA GENMASK(31, 24)
/*
* [0] DONE Interrupt
* [1] End Of Message Interrupt
* [2] Not Acknowlegde Interrupt
* [3] Arbitration Loss Interrupt
* [4] Initiator Error Interrupt
* [5] Follower Error Interrupt
* [6] Wake-Up Interrupt
*/
#define CECB_INTR_MASKN_REG 0x10
#define CECB_INTR_CLR_REG 0x14
#define CECB_INTR_STAT_REG 0x18
#define CECB_INTR_DONE BIT(0)
#define CECB_INTR_EOM BIT(1)
#define CECB_INTR_NACK BIT(2)
#define CECB_INTR_ARB_LOSS BIT(3)
#define CECB_INTR_INITIATOR_ERR BIT(4)
#define CECB_INTR_FOLLOWER_ERR BIT(5)
#define CECB_INTR_WAKE_UP BIT(6)
/* CEC Commands */
#define CECB_CTRL 0x00
#define CECB_CTRL_SEND BIT(0)
#define CECB_CTRL_TYPE GENMASK(2, 1)
#define CECB_CTRL_TYPE_RETRY 0
#define CECB_CTRL_TYPE_NEW 1
#define CECB_CTRL_TYPE_NEXT 2
#define CECB_CTRL2 0x01
#define CECB_CTRL2_RISE_DEL_MAX GENMASK(4, 0)
#define CECB_INTR_MASK 0x02
#define CECB_LADD_LOW 0x05
#define CECB_LADD_HIGH 0x06
#define CECB_TX_CNT 0x07
#define CECB_RX_CNT 0x08
#define CECB_STAT0 0x09
#define CECB_TX_DATA00 0x10
#define CECB_TX_DATA01 0x11
#define CECB_TX_DATA02 0x12
#define CECB_TX_DATA03 0x13
#define CECB_TX_DATA04 0x14
#define CECB_TX_DATA05 0x15
#define CECB_TX_DATA06 0x16
#define CECB_TX_DATA07 0x17
#define CECB_TX_DATA08 0x18
#define CECB_TX_DATA09 0x19
#define CECB_TX_DATA10 0x1A
#define CECB_TX_DATA11 0x1B
#define CECB_TX_DATA12 0x1C
#define CECB_TX_DATA13 0x1D
#define CECB_TX_DATA14 0x1E
#define CECB_TX_DATA15 0x1F
#define CECB_RX_DATA00 0x20
#define CECB_RX_DATA01 0x21
#define CECB_RX_DATA02 0x22
#define CECB_RX_DATA03 0x23
#define CECB_RX_DATA04 0x24
#define CECB_RX_DATA05 0x25
#define CECB_RX_DATA06 0x26
#define CECB_RX_DATA07 0x27
#define CECB_RX_DATA08 0x28
#define CECB_RX_DATA09 0x29
#define CECB_RX_DATA10 0x2A
#define CECB_RX_DATA11 0x2B
#define CECB_RX_DATA12 0x2C
#define CECB_RX_DATA13 0x2D
#define CECB_RX_DATA14 0x2E
#define CECB_RX_DATA15 0x2F
#define CECB_LOCK_BUF 0x30
#define CECB_LOCK_BUF_EN BIT(0)
#define CECB_WAKEUPCTRL 0x31
struct meson_ao_cec_g12a_data {
/* Setup the internal CECB_CTRL2 register */
bool ctrl2_setup;
};
struct meson_ao_cec_g12a_device {
struct platform_device *pdev;
struct regmap *regmap;
struct regmap *regmap_cec;
spinlock_t cec_reg_lock;
struct cec_notifier *notify;
struct cec_adapter *adap;
struct cec_msg rx_msg;
struct clk *oscin;
struct clk *core;
const struct meson_ao_cec_g12a_data *data;
};
static const struct regmap_config meson_ao_cec_g12a_regmap_conf = {
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 4,
.max_register = CECB_INTR_STAT_REG,
};
/*
* The AO-CECB embeds a dual/divider to generate a more precise
* 32,768KHz clock for CEC core clock.
* ______ ______
* | | | |
* ______ | Div1 |-| Cnt1 | ______
* | | /|______| |______|\ | |
* Xtal-->| Gate |---| ______ ______ X-X--| Gate |-->
* |______| | \| | | |/ | |______|
* | | Div2 |-| Cnt2 | |
* | |______| |______| |
* |_______________________|
*
* The dividing can be switched to single or dual, with a counter
* for each divider to set when the switching is done.
* The entire dividing mechanism can be also bypassed.
*/
struct meson_ao_cec_g12a_dualdiv_clk {
struct clk_hw hw;
struct regmap *regmap;
};
#define hw_to_meson_ao_cec_g12a_dualdiv_clk(_hw) \
container_of(_hw, struct meson_ao_cec_g12a_dualdiv_clk, hw) \
static unsigned long
meson_ao_cec_g12a_dualdiv_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk =
hw_to_meson_ao_cec_g12a_dualdiv_clk(hw);
unsigned long n1;
u32 reg0, reg1;
regmap_read(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, ®0);
regmap_read(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, ®1);
if (reg1 & CECB_CLK_CNTL_BYPASS_EN)
return parent_rate;
if (reg0 & CECB_CLK_CNTL_DUAL_EN) {
unsigned long n2, m1, m2, f1, f2, p1, p2;
n1 = FIELD_GET(CECB_CLK_CNTL_N1, reg0) + 1;
n2 = FIELD_GET(CECB_CLK_CNTL_N2, reg0) + 1;
m1 = FIELD_GET(CECB_CLK_CNTL_M1, reg1) + 1;
m2 = FIELD_GET(CECB_CLK_CNTL_M1, reg1) + 1;
f1 = DIV_ROUND_CLOSEST(parent_rate, n1);
f2 = DIV_ROUND_CLOSEST(parent_rate, n2);
p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2));
p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2));
return DIV_ROUND_UP(100000000, p1 + p2);
}
n1 = FIELD_GET(CECB_CLK_CNTL_N1, reg0) + 1;
return DIV_ROUND_CLOSEST(parent_rate, n1);
}
static int meson_ao_cec_g12a_dualdiv_clk_enable(struct clk_hw *hw)
{
struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk =
hw_to_meson_ao_cec_g12a_dualdiv_clk(hw);
/* Disable Input & Output */
regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN,
0);
/* Set N1 & N2 */
regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
CECB_CLK_CNTL_N1,
FIELD_PREP(CECB_CLK_CNTL_N1, 733 - 1));
regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
CECB_CLK_CNTL_N2,
FIELD_PREP(CECB_CLK_CNTL_N2, 732 - 1));
/* Set M1 & M2 */
regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG1,
CECB_CLK_CNTL_M1,
FIELD_PREP(CECB_CLK_CNTL_M1, 8 - 1));
regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG1,
CECB_CLK_CNTL_M2,
FIELD_PREP(CECB_CLK_CNTL_M2, 11 - 1));
/* Enable Dual divisor */
regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
CECB_CLK_CNTL_DUAL_EN, CECB_CLK_CNTL_DUAL_EN);
/* Disable divisor bypass */
regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG1,
CECB_CLK_CNTL_BYPASS_EN, 0);
/* Enable Input & Output */
regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN,
CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN);
return 0;
}
static void meson_ao_cec_g12a_dualdiv_clk_disable(struct clk_hw *hw)
{
struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk =
hw_to_meson_ao_cec_g12a_dualdiv_clk(hw);
regmap_update_bits(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0,
CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN,
0);
}
static int meson_ao_cec_g12a_dualdiv_clk_is_enabled(struct clk_hw *hw)
{
struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk =
hw_to_meson_ao_cec_g12a_dualdiv_clk(hw);
int val;
regmap_read(dualdiv_clk->regmap, CECB_CLK_CNTL_REG0, &val);
return !!(val & (CECB_CLK_CNTL_INPUT_EN | CECB_CLK_CNTL_OUTPUT_EN));
}
static const struct clk_ops meson_ao_cec_g12a_dualdiv_clk_ops = {
.recalc_rate = meson_ao_cec_g12a_dualdiv_clk_recalc_rate,
.is_enabled = meson_ao_cec_g12a_dualdiv_clk_is_enabled,
.enable = meson_ao_cec_g12a_dualdiv_clk_enable,
.disable = meson_ao_cec_g12a_dualdiv_clk_disable,
};
static int meson_ao_cec_g12a_setup_clk(struct meson_ao_cec_g12a_device *ao_cec)
{
struct meson_ao_cec_g12a_dualdiv_clk *dualdiv_clk;
struct device *dev = &ao_cec->pdev->dev;
struct clk_init_data init;
const char *parent_name;
struct clk *clk;
char *name;
dualdiv_clk = devm_kzalloc(dev, sizeof(*dualdiv_clk), GFP_KERNEL);
if (!dualdiv_clk)
return -ENOMEM;
name = kasprintf(GFP_KERNEL, "%s#dualdiv_clk", dev_name(dev));
if (!name)
return -ENOMEM;
parent_name = __clk_get_name(ao_cec->oscin);
init.name = name;
init.ops = &meson_ao_cec_g12a_dualdiv_clk_ops;
init.flags = 0;
init.parent_names = &parent_name;
init.num_parents = 1;
dualdiv_clk->regmap = ao_cec->regmap;
dualdiv_clk->hw.init = &init;
clk = devm_clk_register(dev, &dualdiv_clk->hw);
kfree(name);
if (IS_ERR(clk)) {
dev_err(dev, "failed to register clock\n");
return PTR_ERR(clk);
}
ao_cec->core = clk;
return 0;
}
static int meson_ao_cec_g12a_read(void *context, unsigned int addr,
unsigned int *data)
{
struct meson_ao_cec_g12a_device *ao_cec = context;
u32 reg = FIELD_PREP(CECB_RW_ADDR, addr);
int ret = 0;
ret = regmap_write(ao_cec->regmap, CECB_RW_REG, reg);
if (ret)
return ret;
ret = regmap_read_poll_timeout(ao_cec->regmap, CECB_RW_REG, reg,
!(reg & CECB_RW_BUS_BUSY),
5, 1000);
if (ret)
return ret;
ret = regmap_read(ao_cec->regmap, CECB_RW_REG, ®);
*data = FIELD_GET(CECB_RW_RD_DATA, reg);
return ret;
}
static int meson_ao_cec_g12a_write(void *context, unsigned int addr,
unsigned int data)
{
struct meson_ao_cec_g12a_device *ao_cec = context;
u32 reg = FIELD_PREP(CECB_RW_ADDR, addr) |
FIELD_PREP(CECB_RW_WR_DATA, data) |
CECB_RW_WRITE_EN;
return regmap_write(ao_cec->regmap, CECB_RW_REG, reg);
}
static const struct regmap_config meson_ao_cec_g12a_cec_regmap_conf = {
.reg_bits = 8,
.val_bits = 8,
.reg_read = meson_ao_cec_g12a_read,
.reg_write = meson_ao_cec_g12a_write,
.max_register = 0xffff,
};
static inline void
meson_ao_cec_g12a_irq_setup(struct meson_ao_cec_g12a_device *ao_cec,
bool enable)
{
u32 cfg = CECB_INTR_DONE | CECB_INTR_EOM | CECB_INTR_NACK |
CECB_INTR_ARB_LOSS | CECB_INTR_INITIATOR_ERR |
CECB_INTR_FOLLOWER_ERR;
regmap_write(ao_cec->regmap, CECB_INTR_MASKN_REG,
enable ? cfg : 0);
}
static void meson_ao_cec_g12a_irq_rx(struct meson_ao_cec_g12a_device *ao_cec)
{
int i, ret = 0;
u32 val;
ret = regmap_read(ao_cec->regmap_cec, CECB_RX_CNT, &val);
ao_cec->rx_msg.len = val;
if (ao_cec->rx_msg.len > CEC_MAX_MSG_SIZE)
ao_cec->rx_msg.len = CEC_MAX_MSG_SIZE;
for (i = 0; i < ao_cec->rx_msg.len; i++) {
ret |= regmap_read(ao_cec->regmap_cec,
CECB_RX_DATA00 + i, &val);
ao_cec->rx_msg.msg[i] = val & 0xff;
}
ret |= regmap_write(ao_cec->regmap_cec, CECB_LOCK_BUF, 0);
if (ret)
return;
cec_received_msg(ao_cec->adap, &ao_cec->rx_msg);
}
static irqreturn_t meson_ao_cec_g12a_irq(int irq, void *data)
{
struct meson_ao_cec_g12a_device *ao_cec = data;
u32 stat;
regmap_read(ao_cec->regmap, CECB_INTR_STAT_REG, &stat);
if (stat)
return IRQ_WAKE_THREAD;
return IRQ_NONE;
}
static irqreturn_t meson_ao_cec_g12a_irq_thread(int irq, void *data)
{
struct meson_ao_cec_g12a_device *ao_cec = data;
u32 stat;
regmap_read(ao_cec->regmap, CECB_INTR_STAT_REG, &stat);
regmap_write(ao_cec->regmap, CECB_INTR_CLR_REG, stat);
if (stat & CECB_INTR_DONE)
cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_OK);
if (stat & CECB_INTR_EOM)
meson_ao_cec_g12a_irq_rx(ao_cec);
if (stat & CECB_INTR_NACK)
cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_NACK);
if (stat & CECB_INTR_ARB_LOSS) {
regmap_write(ao_cec->regmap_cec, CECB_TX_CNT, 0);
regmap_update_bits(ao_cec->regmap_cec, CECB_CTRL,
CECB_CTRL_SEND | CECB_CTRL_TYPE, 0);
cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ARB_LOST);
}
/* Initiator reports an error on the CEC bus */
if (stat & CECB_INTR_INITIATOR_ERR)
cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ERROR);
/* Follower reports a receive error, just reset RX buffer */
if (stat & CECB_INTR_FOLLOWER_ERR)
regmap_write(ao_cec->regmap_cec, CECB_LOCK_BUF, 0);
return IRQ_HANDLED;
}
static int
meson_ao_cec_g12a_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
{
struct meson_ao_cec_g12a_device *ao_cec = adap->priv;
int ret = 0;
if (logical_addr == CEC_LOG_ADDR_INVALID) {
/* Assume this will allways succeed */
regmap_write(ao_cec->regmap_cec, CECB_LADD_LOW, 0);
regmap_write(ao_cec->regmap_cec, CECB_LADD_HIGH, 0);
return 0;
} else if (logical_addr < 8) {
ret = regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_LOW,
BIT(logical_addr),
BIT(logical_addr));
} else {
ret = regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_HIGH,
BIT(logical_addr - 8),
BIT(logical_addr - 8));
}
/* Always set Broadcast/Unregistered 15 address */
ret |= regmap_update_bits(ao_cec->regmap_cec, CECB_LADD_HIGH,
BIT(CEC_LOG_ADDR_UNREGISTERED - 8),
BIT(CEC_LOG_ADDR_UNREGISTERED - 8));
return ret ? -EIO : 0;
}
static int meson_ao_cec_g12a_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct meson_ao_cec_g12a_device *ao_cec = adap->priv;
unsigned int type;
int ret = 0;
u32 val;
int i;
/* Check if RX is in progress */
ret = regmap_read(ao_cec->regmap_cec, CECB_LOCK_BUF, &val);
if (ret)
return ret;
if (val & CECB_LOCK_BUF_EN)
return -EBUSY;
/* Check if TX Busy */
ret = regmap_read(ao_cec->regmap_cec, CECB_CTRL, &val);
if (ret)
return ret;
if (val & CECB_CTRL_SEND)
return -EBUSY;
switch (signal_free_time) {
case CEC_SIGNAL_FREE_TIME_RETRY:
type = CECB_CTRL_TYPE_RETRY;
break;
case CEC_SIGNAL_FREE_TIME_NEXT_XFER:
type = CECB_CTRL_TYPE_NEXT;
break;
case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR:
default:
type = CECB_CTRL_TYPE_NEW;
break;
}
for (i = 0; i < msg->len; i++)
ret |= regmap_write(ao_cec->regmap_cec, CECB_TX_DATA00 + i,
msg->msg[i]);
ret |= regmap_write(ao_cec->regmap_cec, CECB_TX_CNT, msg->len);
if (ret)
return -EIO;
ret = regmap_update_bits(ao_cec->regmap_cec, CECB_CTRL,
CECB_CTRL_SEND |
CECB_CTRL_TYPE,
CECB_CTRL_SEND |
FIELD_PREP(CECB_CTRL_TYPE, type));
return ret;
}
static int meson_ao_cec_g12a_adap_enable(struct cec_adapter *adap, bool enable)
{
struct meson_ao_cec_g12a_device *ao_cec = adap->priv;
meson_ao_cec_g12a_irq_setup(ao_cec, false);
regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
CECB_GEN_CNTL_RESET, CECB_GEN_CNTL_RESET);
if (!enable)
return 0;
/* Setup Filter */
regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
CECB_GEN_CNTL_FILTER_TICK_SEL |
CECB_GEN_CNTL_FILTER_DEL,
FIELD_PREP(CECB_GEN_CNTL_FILTER_TICK_SEL,
CECB_GEN_CNTL_FILTER_TICK_1US) |
FIELD_PREP(CECB_GEN_CNTL_FILTER_DEL, 7));
/* Enable System Clock */
regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
CECB_GEN_CNTL_SYS_CLK_EN,
CECB_GEN_CNTL_SYS_CLK_EN);
/* Enable gated clock (Normal mode). */
regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
CECB_GEN_CNTL_CLK_CTRL_MASK,
FIELD_PREP(CECB_GEN_CNTL_CLK_CTRL_MASK,
CECB_GEN_CNTL_CLK_ENABLE));
/* Release Reset */
regmap_update_bits(ao_cec->regmap, CECB_GEN_CNTL_REG,
CECB_GEN_CNTL_RESET, 0);
if (ao_cec->data->ctrl2_setup)
regmap_write(ao_cec->regmap_cec, CECB_CTRL2,
FIELD_PREP(CECB_CTRL2_RISE_DEL_MAX, 2));
meson_ao_cec_g12a_irq_setup(ao_cec, true);
return 0;
}
static const struct cec_adap_ops meson_ao_cec_g12a_ops = {
.adap_enable = meson_ao_cec_g12a_adap_enable,
.adap_log_addr = meson_ao_cec_g12a_set_log_addr,
.adap_transmit = meson_ao_cec_g12a_transmit,
};
static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
{
struct meson_ao_cec_g12a_device *ao_cec;
struct device *hdmi_dev;
void __iomem *base;
int ret, irq;
hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
if (IS_ERR(hdmi_dev))
return PTR_ERR(hdmi_dev);
ao_cec = devm_kzalloc(&pdev->dev, sizeof(*ao_cec), GFP_KERNEL);
if (!ao_cec)
return -ENOMEM;
ao_cec->data = of_device_get_match_data(&pdev->dev);
if (!ao_cec->data) {
dev_err(&pdev->dev, "failed to get match data\n");
return -ENODEV;
}
spin_lock_init(&ao_cec->cec_reg_lock);
ao_cec->pdev = pdev;
ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_g12a_ops, ao_cec,
"meson_g12a_ao_cec",
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
ao_cec->adap->owner = THIS_MODULE;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_probe_adapter;
}
ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_ao_cec_g12a_regmap_conf);
if (IS_ERR(ao_cec->regmap)) {
ret = PTR_ERR(ao_cec->regmap);
goto out_probe_adapter;
}
ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec,
&meson_ao_cec_g12a_cec_regmap_conf);
if (IS_ERR(ao_cec->regmap_cec)) {
ret = PTR_ERR(ao_cec->regmap_cec);
goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
ret = devm_request_threaded_irq(&pdev->dev, irq,
meson_ao_cec_g12a_irq,
meson_ao_cec_g12a_irq_thread,
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
goto out_probe_adapter;
}
ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin");
if (IS_ERR(ao_cec->oscin)) {
dev_err(&pdev->dev, "oscin clock request failed\n");
ret = PTR_ERR(ao_cec->oscin);
goto out_probe_adapter;
}
ret = meson_ao_cec_g12a_setup_clk(ao_cec);
if (ret)
goto out_probe_adapter;
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
goto out_probe_adapter;
}
device_reset_optional(&pdev->dev);
platform_set_drvdata(pdev, ao_cec);
ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
ao_cec->adap);
if (!ao_cec->notify) {
ret = -ENOMEM;
goto out_probe_core_clk;
}
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
goto out_probe_notify;
/* Setup Hardware */
regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET);
return 0;
out_probe_notify:
cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
out_probe_core_clk:
clk_disable_unprepare(ao_cec->core);
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
dev_err(&pdev->dev, "CEC controller registration failed\n");
return ret;
}
static void meson_ao_cec_g12a_remove(struct platform_device *pdev)
{
struct meson_ao_cec_g12a_device *ao_cec = platform_get_drvdata(pdev);
clk_disable_unprepare(ao_cec->core);
cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
cec_unregister_adapter(ao_cec->adap);
}
static const struct meson_ao_cec_g12a_data ao_cec_g12a_data = {
.ctrl2_setup = false,
};
static const struct meson_ao_cec_g12a_data ao_cec_sm1_data = {
.ctrl2_setup = true,
};
static const struct of_device_id meson_ao_cec_g12a_of_match[] = {
{
.compatible = "amlogic,meson-g12a-ao-cec",
.data = &ao_cec_g12a_data,
},
{
.compatible = "amlogic,meson-sm1-ao-cec",
.data = &ao_cec_sm1_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_ao_cec_g12a_of_match);
static struct platform_driver meson_ao_cec_g12a_driver = {
.probe = meson_ao_cec_g12a_probe,
.remove_new = meson_ao_cec_g12a_remove,
.driver = {
.name = "meson-ao-cec-g12a",
.of_match_table = of_match_ptr(meson_ao_cec_g12a_of_match),
},
};
module_platform_driver(meson_ao_cec_g12a_driver);
MODULE_DESCRIPTION("Meson AO CEC G12A Controller driver");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/cec/platform/meson/ao-cec-g12a.c |
// SPDX-License-Identifier: GPL-2.0
/*
* STM32 CEC driver
* Copyright (C) STMicroelectronics SA 2017
*
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <media/cec.h>
#define CEC_NAME "stm32-cec"
/* CEC registers */
#define CEC_CR 0x0000 /* Control Register */
#define CEC_CFGR 0x0004 /* ConFiGuration Register */
#define CEC_TXDR 0x0008 /* Rx data Register */
#define CEC_RXDR 0x000C /* Rx data Register */
#define CEC_ISR 0x0010 /* Interrupt and status Register */
#define CEC_IER 0x0014 /* Interrupt enable Register */
#define TXEOM BIT(2)
#define TXSOM BIT(1)
#define CECEN BIT(0)
#define LSTN BIT(31)
#define OAR GENMASK(30, 16)
#define SFTOP BIT(8)
#define BRDNOGEN BIT(7)
#define LBPEGEN BIT(6)
#define BREGEN BIT(5)
#define BRESTP BIT(4)
#define RXTOL BIT(3)
#define SFT GENMASK(2, 0)
#define FULL_CFG (LSTN | SFTOP | BRDNOGEN | LBPEGEN | BREGEN | BRESTP \
| RXTOL)
#define TXACKE BIT(12)
#define TXERR BIT(11)
#define TXUDR BIT(10)
#define TXEND BIT(9)
#define TXBR BIT(8)
#define ARBLST BIT(7)
#define RXACKE BIT(6)
#define RXOVR BIT(2)
#define RXEND BIT(1)
#define RXBR BIT(0)
#define ALL_TX_IT (TXEND | TXBR | TXACKE | TXERR | TXUDR | ARBLST)
#define ALL_RX_IT (RXEND | RXBR | RXACKE | RXOVR)
/*
* 400 ms is the time it takes for one 16 byte message to be
* transferred and 5 is the maximum number of retries. Add
* another 100 ms as a margin.
*/
#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
struct stm32_cec {
struct cec_adapter *adap;
struct device *dev;
struct clk *clk_cec;
struct clk *clk_hdmi_cec;
struct reset_control *rstc;
struct regmap *regmap;
int irq;
u32 irq_status;
struct cec_msg rx_msg;
struct cec_msg tx_msg;
int tx_cnt;
};
static void cec_hw_init(struct stm32_cec *cec)
{
regmap_update_bits(cec->regmap, CEC_CR, TXEOM | TXSOM | CECEN, 0);
regmap_update_bits(cec->regmap, CEC_IER, ALL_TX_IT | ALL_RX_IT,
ALL_TX_IT | ALL_RX_IT);
regmap_update_bits(cec->regmap, CEC_CFGR, FULL_CFG, FULL_CFG);
}
static void stm32_tx_done(struct stm32_cec *cec, u32 status)
{
if (status & (TXERR | TXUDR)) {
cec_transmit_done(cec->adap, CEC_TX_STATUS_ERROR,
0, 0, 0, 1);
return;
}
if (status & ARBLST) {
cec_transmit_done(cec->adap, CEC_TX_STATUS_ARB_LOST,
1, 0, 0, 0);
return;
}
if (status & TXACKE) {
cec_transmit_done(cec->adap, CEC_TX_STATUS_NACK,
0, 1, 0, 0);
return;
}
if (cec->irq_status & TXBR) {
/* send next byte */
if (cec->tx_cnt < cec->tx_msg.len)
regmap_write(cec->regmap, CEC_TXDR,
cec->tx_msg.msg[cec->tx_cnt++]);
/* TXEOM is set to command transmission of the last byte */
if (cec->tx_cnt == cec->tx_msg.len)
regmap_update_bits(cec->regmap, CEC_CR, TXEOM, TXEOM);
}
if (cec->irq_status & TXEND)
cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
}
static void stm32_rx_done(struct stm32_cec *cec, u32 status)
{
if (cec->irq_status & (RXACKE | RXOVR)) {
cec->rx_msg.len = 0;
return;
}
if (cec->irq_status & RXBR) {
u32 val;
regmap_read(cec->regmap, CEC_RXDR, &val);
cec->rx_msg.msg[cec->rx_msg.len++] = val & 0xFF;
}
if (cec->irq_status & RXEND) {
cec_received_msg(cec->adap, &cec->rx_msg);
cec->rx_msg.len = 0;
}
}
static irqreturn_t stm32_cec_irq_thread(int irq, void *arg)
{
struct stm32_cec *cec = arg;
if (cec->irq_status & ALL_TX_IT)
stm32_tx_done(cec, cec->irq_status);
if (cec->irq_status & ALL_RX_IT)
stm32_rx_done(cec, cec->irq_status);
cec->irq_status = 0;
return IRQ_HANDLED;
}
static irqreturn_t stm32_cec_irq_handler(int irq, void *arg)
{
struct stm32_cec *cec = arg;
regmap_read(cec->regmap, CEC_ISR, &cec->irq_status);
regmap_update_bits(cec->regmap, CEC_ISR,
ALL_TX_IT | ALL_RX_IT,
ALL_TX_IT | ALL_RX_IT);
return IRQ_WAKE_THREAD;
}
static int stm32_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct stm32_cec *cec = adap->priv;
int ret = 0;
if (enable) {
ret = clk_enable(cec->clk_cec);
if (ret)
dev_err(cec->dev, "fail to enable cec clock\n");
clk_enable(cec->clk_hdmi_cec);
regmap_update_bits(cec->regmap, CEC_CR, CECEN, CECEN);
} else {
clk_disable(cec->clk_cec);
clk_disable(cec->clk_hdmi_cec);
regmap_update_bits(cec->regmap, CEC_CR, CECEN, 0);
}
return ret;
}
static int stm32_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
{
struct stm32_cec *cec = adap->priv;
u32 oar = (1 << logical_addr) << 16;
u32 val;
/* Poll every 100µs the register CEC_CR to wait end of transmission */
regmap_read_poll_timeout(cec->regmap, CEC_CR, val, !(val & TXSOM),
100, CEC_XFER_TIMEOUT_MS * 1000);
regmap_update_bits(cec->regmap, CEC_CR, CECEN, 0);
if (logical_addr == CEC_LOG_ADDR_INVALID)
regmap_update_bits(cec->regmap, CEC_CFGR, OAR, 0);
else
regmap_update_bits(cec->regmap, CEC_CFGR, oar, oar);
regmap_update_bits(cec->regmap, CEC_CR, CECEN, CECEN);
return 0;
}
static int stm32_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct stm32_cec *cec = adap->priv;
/* Copy message */
cec->tx_msg = *msg;
cec->tx_cnt = 0;
/*
* If the CEC message consists of only one byte,
* TXEOM must be set before of TXSOM.
*/
if (cec->tx_msg.len == 1)
regmap_update_bits(cec->regmap, CEC_CR, TXEOM, TXEOM);
/* TXSOM is set to command transmission of the first byte */
regmap_update_bits(cec->regmap, CEC_CR, TXSOM, TXSOM);
/* Write the header (first byte of message) */
regmap_write(cec->regmap, CEC_TXDR, cec->tx_msg.msg[0]);
cec->tx_cnt++;
return 0;
}
static const struct cec_adap_ops stm32_cec_adap_ops = {
.adap_enable = stm32_cec_adap_enable,
.adap_log_addr = stm32_cec_adap_log_addr,
.adap_transmit = stm32_cec_adap_transmit,
};
static const struct regmap_config stm32_cec_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = sizeof(u32),
.max_register = 0x14,
.fast_io = true,
};
static int stm32_cec_probe(struct platform_device *pdev)
{
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_MODE_MONITOR_ALL;
struct stm32_cec *cec;
void __iomem *mmio;
int ret;
cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
cec->dev = &pdev->dev;
mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio))
return PTR_ERR(mmio);
cec->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "cec", mmio,
&stm32_cec_regmap_cfg);
if (IS_ERR(cec->regmap))
return PTR_ERR(cec->regmap);
cec->irq = platform_get_irq(pdev, 0);
if (cec->irq < 0)
return cec->irq;
ret = devm_request_threaded_irq(&pdev->dev, cec->irq,
stm32_cec_irq_handler,
stm32_cec_irq_thread,
0,
pdev->name, cec);
if (ret)
return ret;
cec->clk_cec = devm_clk_get(&pdev->dev, "cec");
if (IS_ERR(cec->clk_cec))
return dev_err_probe(&pdev->dev, PTR_ERR(cec->clk_cec),
"Cannot get cec clock\n");
ret = clk_prepare(cec->clk_cec);
if (ret) {
dev_err(&pdev->dev, "Unable to prepare cec clock\n");
return ret;
}
cec->clk_hdmi_cec = devm_clk_get(&pdev->dev, "hdmi-cec");
if (IS_ERR(cec->clk_hdmi_cec) &&
PTR_ERR(cec->clk_hdmi_cec) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_unprepare_cec_clk;
}
if (!IS_ERR(cec->clk_hdmi_cec)) {
ret = clk_prepare(cec->clk_hdmi_cec);
if (ret) {
dev_err(&pdev->dev, "Can't prepare hdmi-cec clock\n");
goto err_unprepare_cec_clk;
}
}
/*
* CEC_CAP_PHYS_ADDR caps should be removed when a cec notifier is
* available for example when a drm driver can provide edid
*/
cec->adap = cec_allocate_adapter(&stm32_cec_adap_ops, cec,
CEC_NAME, caps, CEC_MAX_LOG_ADDRS);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
goto err_unprepare_hdmi_cec_clk;
ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret)
goto err_delete_adapter;
cec_hw_init(cec);
platform_set_drvdata(pdev, cec);
return 0;
err_delete_adapter:
cec_delete_adapter(cec->adap);
err_unprepare_hdmi_cec_clk:
clk_unprepare(cec->clk_hdmi_cec);
err_unprepare_cec_clk:
clk_unprepare(cec->clk_cec);
return ret;
}
static void stm32_cec_remove(struct platform_device *pdev)
{
struct stm32_cec *cec = platform_get_drvdata(pdev);
clk_unprepare(cec->clk_cec);
clk_unprepare(cec->clk_hdmi_cec);
cec_unregister_adapter(cec->adap);
}
static const struct of_device_id stm32_cec_of_match[] = {
{ .compatible = "st,stm32-cec" },
{ /* end node */ }
};
MODULE_DEVICE_TABLE(of, stm32_cec_of_match);
static struct platform_driver stm32_cec_driver = {
.probe = stm32_cec_probe,
.remove_new = stm32_cec_remove,
.driver = {
.name = CEC_NAME,
.of_match_table = stm32_cec_of_match,
},
};
module_platform_driver(stm32_cec_driver);
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_AUTHOR("Yannick Fertre <[email protected]>");
MODULE_DESCRIPTION("STMicroelectronics STM32 Consumer Electronics Control");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/cec/platform/stm32/stm32-cec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <media/cec-notifier.h>
#include <media/cec-pin.h>
struct cec_gpio {
struct cec_adapter *adap;
struct cec_notifier *notifier;
struct device *dev;
struct gpio_desc *cec_gpio;
int cec_irq;
bool cec_is_low;
struct gpio_desc *hpd_gpio;
int hpd_irq;
bool hpd_is_high;
ktime_t hpd_ts;
struct gpio_desc *v5_gpio;
int v5_irq;
bool v5_is_high;
ktime_t v5_ts;
};
static int cec_gpio_read(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
if (cec->cec_is_low)
return 0;
return gpiod_get_value(cec->cec_gpio);
}
static void cec_gpio_high(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
if (!cec->cec_is_low)
return;
cec->cec_is_low = false;
gpiod_set_value(cec->cec_gpio, 1);
}
static void cec_gpio_low(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
if (cec->cec_is_low)
return;
cec->cec_is_low = true;
gpiod_set_value(cec->cec_gpio, 0);
}
static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv)
{
struct cec_gpio *cec = priv;
cec_queue_pin_hpd_event(cec->adap, cec->hpd_is_high, cec->hpd_ts);
return IRQ_HANDLED;
}
static irqreturn_t cec_5v_gpio_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
int val = gpiod_get_value(cec->v5_gpio);
bool is_high = val > 0;
if (val < 0 || is_high == cec->v5_is_high)
return IRQ_HANDLED;
cec->v5_ts = ktime_get();
cec->v5_is_high = is_high;
return IRQ_WAKE_THREAD;
}
static irqreturn_t cec_5v_gpio_irq_handler_thread(int irq, void *priv)
{
struct cec_gpio *cec = priv;
cec_queue_pin_5v_event(cec->adap, cec->v5_is_high, cec->v5_ts);
return IRQ_HANDLED;
}
static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
int val = gpiod_get_value(cec->hpd_gpio);
bool is_high = val > 0;
if (val < 0 || is_high == cec->hpd_is_high)
return IRQ_HANDLED;
cec->hpd_ts = ktime_get();
cec->hpd_is_high = is_high;
return IRQ_WAKE_THREAD;
}
static irqreturn_t cec_gpio_irq_handler(int irq, void *priv)
{
struct cec_gpio *cec = priv;
int val = gpiod_get_value(cec->cec_gpio);
if (val >= 0)
cec_pin_changed(cec->adap, val > 0);
return IRQ_HANDLED;
}
static bool cec_gpio_enable_irq(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
enable_irq(cec->cec_irq);
return true;
}
static void cec_gpio_disable_irq(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
disable_irq(cec->cec_irq);
}
static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
seq_printf(file, "mode: %s\n", cec->cec_is_low ? "low-drive" : "read");
seq_printf(file, "using irq: %d\n", cec->cec_irq);
if (cec->hpd_gpio)
seq_printf(file, "hpd: %s\n",
cec->hpd_is_high ? "high" : "low");
if (cec->v5_gpio)
seq_printf(file, "5V: %s\n",
cec->v5_is_high ? "high" : "low");
}
static int cec_gpio_read_hpd(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
if (!cec->hpd_gpio)
return -ENOTTY;
return gpiod_get_value(cec->hpd_gpio);
}
static int cec_gpio_read_5v(struct cec_adapter *adap)
{
struct cec_gpio *cec = cec_get_drvdata(adap);
if (!cec->v5_gpio)
return -ENOTTY;
return gpiod_get_value(cec->v5_gpio);
}
static const struct cec_pin_ops cec_gpio_pin_ops = {
.read = cec_gpio_read,
.low = cec_gpio_low,
.high = cec_gpio_high,
.enable_irq = cec_gpio_enable_irq,
.disable_irq = cec_gpio_disable_irq,
.status = cec_gpio_status,
.read_hpd = cec_gpio_read_hpd,
.read_5v = cec_gpio_read_5v,
};
static int cec_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *hdmi_dev;
struct cec_gpio *cec;
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
int ret;
hdmi_dev = cec_notifier_parse_hdmi_phandle(dev);
if (PTR_ERR(hdmi_dev) == -EPROBE_DEFER)
return PTR_ERR(hdmi_dev);
if (IS_ERR(hdmi_dev))
caps |= CEC_CAP_PHYS_ADDR;
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
cec->dev = dev;
cec->cec_gpio = devm_gpiod_get(dev, "cec", GPIOD_OUT_HIGH_OPEN_DRAIN);
if (IS_ERR(cec->cec_gpio))
return PTR_ERR(cec->cec_gpio);
cec->cec_irq = gpiod_to_irq(cec->cec_gpio);
cec->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
if (IS_ERR(cec->hpd_gpio))
return PTR_ERR(cec->hpd_gpio);
cec->v5_gpio = devm_gpiod_get_optional(dev, "v5", GPIOD_IN);
if (IS_ERR(cec->v5_gpio))
return PTR_ERR(cec->v5_gpio);
cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
cec, pdev->name, caps);
if (IS_ERR(cec->adap))
return PTR_ERR(cec->adap);
ret = devm_request_irq(dev, cec->cec_irq, cec_gpio_irq_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN,
cec->adap->name, cec);
if (ret)
goto del_adap;
if (cec->hpd_gpio) {
cec->hpd_irq = gpiod_to_irq(cec->hpd_gpio);
ret = devm_request_threaded_irq(dev, cec->hpd_irq,
cec_hpd_gpio_irq_handler,
cec_hpd_gpio_irq_handler_thread,
IRQF_ONESHOT |
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"hpd-gpio", cec);
if (ret)
goto del_adap;
}
if (cec->v5_gpio) {
cec->v5_irq = gpiod_to_irq(cec->v5_gpio);
ret = devm_request_threaded_irq(dev, cec->v5_irq,
cec_5v_gpio_irq_handler,
cec_5v_gpio_irq_handler_thread,
IRQF_ONESHOT |
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"v5-gpio", cec);
if (ret)
goto del_adap;
}
if (!IS_ERR(hdmi_dev)) {
cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
cec->adap);
if (!cec->notifier) {
ret = -ENOMEM;
goto del_adap;
}
}
ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret)
goto unreg_notifier;
platform_set_drvdata(pdev, cec);
return 0;
unreg_notifier:
cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
del_adap:
cec_delete_adapter(cec->adap);
return ret;
}
static void cec_gpio_remove(struct platform_device *pdev)
{
struct cec_gpio *cec = platform_get_drvdata(pdev);
cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
}
static const struct of_device_id cec_gpio_match[] = {
{
.compatible = "cec-gpio",
},
{},
};
MODULE_DEVICE_TABLE(of, cec_gpio_match);
static struct platform_driver cec_gpio_pdrv = {
.probe = cec_gpio_probe,
.remove_new = cec_gpio_remove,
.driver = {
.name = "cec-gpio",
.of_match_table = cec_gpio_match,
},
};
module_platform_driver(cec_gpio_pdrv);
MODULE_AUTHOR("Hans Verkuil <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CEC GPIO driver");
| linux-master | drivers/media/cec/platform/cec-gpio/cec-gpio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* STIH4xx CEC driver
* Copyright (C) STMicroelectronics SA 2016
*
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <media/cec.h>
#include <media/cec-notifier.h>
#define CEC_NAME "stih-cec"
/* CEC registers */
#define CEC_CLK_DIV 0x0
#define CEC_CTRL 0x4
#define CEC_IRQ_CTRL 0x8
#define CEC_STATUS 0xC
#define CEC_EXT_STATUS 0x10
#define CEC_TX_CTRL 0x14
#define CEC_FREE_TIME_THRESH 0x18
#define CEC_BIT_TOUT_THRESH 0x1C
#define CEC_BIT_PULSE_THRESH 0x20
#define CEC_DATA 0x24
#define CEC_TX_ARRAY_CTRL 0x28
#define CEC_CTRL2 0x2C
#define CEC_TX_ERROR_STS 0x30
#define CEC_ADDR_TABLE 0x34
#define CEC_DATA_ARRAY_CTRL 0x38
#define CEC_DATA_ARRAY_STATUS 0x3C
#define CEC_TX_DATA_BASE 0x40
#define CEC_TX_DATA_TOP 0x50
#define CEC_TX_DATA_SIZE 0x1
#define CEC_RX_DATA_BASE 0x54
#define CEC_RX_DATA_TOP 0x64
#define CEC_RX_DATA_SIZE 0x1
/* CEC_CTRL2 */
#define CEC_LINE_INACTIVE_EN BIT(0)
#define CEC_AUTO_BUS_ERR_EN BIT(1)
#define CEC_STOP_ON_ARB_ERR_EN BIT(2)
#define CEC_TX_REQ_WAIT_EN BIT(3)
/* CEC_DATA_ARRAY_CTRL */
#define CEC_TX_ARRAY_EN BIT(0)
#define CEC_RX_ARRAY_EN BIT(1)
#define CEC_TX_ARRAY_RESET BIT(2)
#define CEC_RX_ARRAY_RESET BIT(3)
#define CEC_TX_N_OF_BYTES_IRQ_EN BIT(4)
#define CEC_TX_STOP_ON_NACK BIT(7)
/* CEC_TX_ARRAY_CTRL */
#define CEC_TX_N_OF_BYTES 0x1F
#define CEC_TX_START BIT(5)
#define CEC_TX_AUTO_SOM_EN BIT(6)
#define CEC_TX_AUTO_EOM_EN BIT(7)
/* CEC_IRQ_CTRL */
#define CEC_TX_DONE_IRQ_EN BIT(0)
#define CEC_ERROR_IRQ_EN BIT(2)
#define CEC_RX_DONE_IRQ_EN BIT(3)
#define CEC_RX_SOM_IRQ_EN BIT(4)
#define CEC_RX_EOM_IRQ_EN BIT(5)
#define CEC_FREE_TIME_IRQ_EN BIT(6)
#define CEC_PIN_STS_IRQ_EN BIT(7)
/* CEC_CTRL */
#define CEC_IN_FILTER_EN BIT(0)
#define CEC_PWR_SAVE_EN BIT(1)
#define CEC_EN BIT(4)
#define CEC_ACK_CTRL BIT(5)
#define CEC_RX_RESET_EN BIT(6)
#define CEC_IGNORE_RX_ERROR BIT(7)
/* CEC_STATUS */
#define CEC_TX_DONE_STS BIT(0)
#define CEC_TX_ACK_GET_STS BIT(1)
#define CEC_ERROR_STS BIT(2)
#define CEC_RX_DONE_STS BIT(3)
#define CEC_RX_SOM_STS BIT(4)
#define CEC_RX_EOM_STS BIT(5)
#define CEC_FREE_TIME_IRQ_STS BIT(6)
#define CEC_PIN_STS BIT(7)
#define CEC_SBIT_TOUT_STS BIT(8)
#define CEC_DBIT_TOUT_STS BIT(9)
#define CEC_LPULSE_ERROR_STS BIT(10)
#define CEC_HPULSE_ERROR_STS BIT(11)
#define CEC_TX_ERROR BIT(12)
#define CEC_TX_ARB_ERROR BIT(13)
#define CEC_RX_ERROR_MIN BIT(14)
#define CEC_RX_ERROR_MAX BIT(15)
/* Signal free time in bit periods (2.4ms) */
#define CEC_PRESENT_INIT_SFT 7
#define CEC_NEW_INIT_SFT 5
#define CEC_RETRANSMIT_SFT 3
/* Constants for CEC_BIT_TOUT_THRESH register */
#define CEC_SBIT_TOUT_47MS BIT(1)
#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1))
#define CEC_SBIT_TOUT_50MS BIT(2)
#define CEC_DBIT_TOUT_27MS BIT(0)
#define CEC_DBIT_TOUT_28MS BIT(1)
#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1))
/* Constants for CEC_BIT_PULSE_THRESH register */
#define CEC_BIT_LPULSE_03MS BIT(1)
#define CEC_BIT_HPULSE_03MS BIT(3)
/* Constants for CEC_DATA_ARRAY_STATUS register */
#define CEC_RX_N_OF_BYTES 0x1F
#define CEC_TX_N_OF_BYTES_SENT BIT(5)
#define CEC_RX_OVERRUN BIT(6)
struct stih_cec {
struct cec_adapter *adap;
struct device *dev;
struct clk *clk;
void __iomem *regs;
int irq;
u32 irq_status;
struct cec_notifier *notifier;
};
static int stih_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct stih_cec *cec = cec_get_drvdata(adap);
if (enable) {
/* The doc says (input TCLK_PERIOD * CEC_CLK_DIV) = 0.1ms */
unsigned long clk_freq = clk_get_rate(cec->clk);
u32 cec_clk_div = clk_freq / 10000;
writel(cec_clk_div, cec->regs + CEC_CLK_DIV);
/* Configuration of the durations activating a timeout */
writel(CEC_SBIT_TOUT_47MS | (CEC_DBIT_TOUT_28MS << 4),
cec->regs + CEC_BIT_TOUT_THRESH);
/* Configuration of the smallest allowed duration for pulses */
writel(CEC_BIT_LPULSE_03MS | CEC_BIT_HPULSE_03MS,
cec->regs + CEC_BIT_PULSE_THRESH);
/* Minimum received bit period threshold */
writel(BIT(5) | BIT(7), cec->regs + CEC_TX_CTRL);
/* Configuration of transceiver data arrays */
writel(CEC_TX_ARRAY_EN | CEC_RX_ARRAY_EN | CEC_TX_STOP_ON_NACK,
cec->regs + CEC_DATA_ARRAY_CTRL);
/* Configuration of the control bits for CEC Transceiver */
writel(CEC_IN_FILTER_EN | CEC_EN | CEC_RX_RESET_EN,
cec->regs + CEC_CTRL);
/* Clear logical addresses */
writel(0, cec->regs + CEC_ADDR_TABLE);
/* Clear the status register */
writel(0x0, cec->regs + CEC_STATUS);
/* Enable the interrupts */
writel(CEC_TX_DONE_IRQ_EN | CEC_RX_DONE_IRQ_EN |
CEC_RX_SOM_IRQ_EN | CEC_RX_EOM_IRQ_EN |
CEC_ERROR_IRQ_EN,
cec->regs + CEC_IRQ_CTRL);
} else {
/* Clear logical addresses */
writel(0, cec->regs + CEC_ADDR_TABLE);
/* Clear the status register */
writel(0x0, cec->regs + CEC_STATUS);
/* Disable the interrupts */
writel(0, cec->regs + CEC_IRQ_CTRL);
}
return 0;
}
static int stih_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
{
struct stih_cec *cec = cec_get_drvdata(adap);
u32 reg = readl(cec->regs + CEC_ADDR_TABLE);
reg |= 1 << logical_addr;
if (logical_addr == CEC_LOG_ADDR_INVALID)
reg = 0;
writel(reg, cec->regs + CEC_ADDR_TABLE);
return 0;
}
static int stih_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct stih_cec *cec = cec_get_drvdata(adap);
int i;
/* Copy message into registers */
for (i = 0; i < msg->len; i++)
writeb(msg->msg[i], cec->regs + CEC_TX_DATA_BASE + i);
/*
* Start transmission, configure hardware to add start and stop bits
* Signal free time is handled by the hardware
*/
writel(CEC_TX_AUTO_SOM_EN | CEC_TX_AUTO_EOM_EN | CEC_TX_START |
msg->len, cec->regs + CEC_TX_ARRAY_CTRL);
return 0;
}
static void stih_tx_done(struct stih_cec *cec, u32 status)
{
if (status & CEC_TX_ERROR) {
cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_ERROR);
return;
}
if (status & CEC_TX_ARB_ERROR) {
cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_ARB_LOST);
return;
}
if (!(status & CEC_TX_ACK_GET_STS)) {
cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_NACK);
return;
}
cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_OK);
}
static void stih_rx_done(struct stih_cec *cec, u32 status)
{
struct cec_msg msg = {};
u8 i;
if (status & CEC_RX_ERROR_MIN)
return;
if (status & CEC_RX_ERROR_MAX)
return;
msg.len = readl(cec->regs + CEC_DATA_ARRAY_STATUS) & 0x1f;
if (!msg.len)
return;
if (msg.len > CEC_MAX_MSG_SIZE)
msg.len = CEC_MAX_MSG_SIZE;
for (i = 0; i < msg.len; i++)
msg.msg[i] = readl(cec->regs + CEC_RX_DATA_BASE + i);
cec_received_msg(cec->adap, &msg);
}
static irqreturn_t stih_cec_irq_handler_thread(int irq, void *priv)
{
struct stih_cec *cec = priv;
if (cec->irq_status & CEC_TX_DONE_STS)
stih_tx_done(cec, cec->irq_status);
if (cec->irq_status & CEC_RX_DONE_STS)
stih_rx_done(cec, cec->irq_status);
cec->irq_status = 0;
return IRQ_HANDLED;
}
static irqreturn_t stih_cec_irq_handler(int irq, void *priv)
{
struct stih_cec *cec = priv;
cec->irq_status = readl(cec->regs + CEC_STATUS);
writel(cec->irq_status, cec->regs + CEC_STATUS);
return IRQ_WAKE_THREAD;
}
static const struct cec_adap_ops sti_cec_adap_ops = {
.adap_enable = stih_cec_adap_enable,
.adap_log_addr = stih_cec_adap_log_addr,
.adap_transmit = stih_cec_adap_transmit,
};
static int stih_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct stih_cec *cec;
struct device *hdmi_dev;
int ret;
hdmi_dev = cec_notifier_parse_hdmi_phandle(dev);
if (IS_ERR(hdmi_dev))
return PTR_ERR(hdmi_dev);
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
cec->dev = dev;
cec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->regs))
return PTR_ERR(cec->regs);
cec->irq = platform_get_irq(pdev, 0);
if (cec->irq < 0)
return cec->irq;
ret = devm_request_threaded_irq(dev, cec->irq, stih_cec_irq_handler,
stih_cec_irq_handler_thread, 0,
pdev->name, cec);
if (ret)
return ret;
cec->clk = devm_clk_get(dev, "cec-clk");
if (IS_ERR(cec->clk)) {
dev_err(dev, "Cannot get cec clock\n");
return PTR_ERR(cec->clk);
}
cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec, CEC_NAME,
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
cec->adap);
if (!cec->notifier) {
ret = -ENOMEM;
goto err_delete_adapter;
}
ret = cec_register_adapter(cec->adap, &pdev->dev);
if (ret)
goto err_notifier;
platform_set_drvdata(pdev, cec);
return 0;
err_notifier:
cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_delete_adapter:
cec_delete_adapter(cec->adap);
return ret;
}
static void stih_cec_remove(struct platform_device *pdev)
{
struct stih_cec *cec = platform_get_drvdata(pdev);
cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
}
static const struct of_device_id stih_cec_match[] = {
{
.compatible = "st,stih-cec",
},
{},
};
MODULE_DEVICE_TABLE(of, stih_cec_match);
static struct platform_driver stih_cec_pdrv = {
.probe = stih_cec_probe,
.remove_new = stih_cec_remove,
.driver = {
.name = CEC_NAME,
.of_match_table = stih_cec_match,
},
};
module_platform_driver(stih_cec_pdrv);
MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("STIH4xx CEC driver");
| linux-master | drivers/media/cec/platform/sti/stih-cec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Chrontel CH7322 CEC Controller
*
* Copyright 2020 Google LLC.
*/
/*
* Notes
*
* - This device powers on in Auto Mode which has limited functionality. This
* driver disables Auto Mode when it attaches.
*
*/
#include <linux/cec.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/regmap.h>
#include <media/cec.h>
#include <media/cec-notifier.h>
#define CH7322_WRITE 0x00
#define CH7322_WRITE_MSENT 0x80
#define CH7322_WRITE_BOK 0x40
#define CH7322_WRITE_NMASK 0x0f
/* Write buffer is 0x01-0x10 */
#define CH7322_WRBUF 0x01
#define CH7322_WRBUF_LEN 0x10
#define CH7322_READ 0x40
#define CH7322_READ_NRDT 0x80
#define CH7322_READ_MSENT 0x20
#define CH7322_READ_NMASK 0x0f
/* Read buffer is 0x41-0x50 */
#define CH7322_RDBUF 0x41
#define CH7322_RDBUF_LEN 0x10
#define CH7322_MODE 0x11
#define CH7322_MODE_AUTO 0x78
#define CH7322_MODE_SW 0xb5
#define CH7322_RESET 0x12
#define CH7322_RESET_RST 0x00
#define CH7322_POWER 0x13
#define CH7322_POWER_FPD 0x04
#define CH7322_CFG0 0x17
#define CH7322_CFG0_EOBEN 0x40
#define CH7322_CFG0_PEOB 0x20
#define CH7322_CFG0_CLRSPP 0x10
#define CH7322_CFG0_FLOW 0x08
#define CH7322_CFG1 0x1a
#define CH7322_CFG1_STDBYO 0x04
#define CH7322_CFG1_HPBP 0x02
#define CH7322_CFG1_PIO 0x01
#define CH7322_INTCTL 0x1b
#define CH7322_INTCTL_INTPB 0x80
#define CH7322_INTCTL_STDBY 0x40
#define CH7322_INTCTL_HPDFALL 0x20
#define CH7322_INTCTL_HPDRISE 0x10
#define CH7322_INTCTL_RXMSG 0x08
#define CH7322_INTCTL_TXMSG 0x04
#define CH7322_INTCTL_NEWPHA 0x02
#define CH7322_INTCTL_ERROR 0x01
#define CH7322_DVCLKFNH 0x1d
#define CH7322_DVCLKFNL 0x1e
#define CH7322_CTL 0x31
#define CH7322_CTL_FSTDBY 0x80
#define CH7322_CTL_PLSEN 0x40
#define CH7322_CTL_PLSPB 0x20
#define CH7322_CTL_SPADL 0x10
#define CH7322_CTL_HINIT 0x08
#define CH7322_CTL_WPHYA 0x04
#define CH7322_CTL_H1T 0x02
#define CH7322_CTL_S1T 0x01
#define CH7322_PAWH 0x32
#define CH7322_PAWL 0x33
#define CH7322_ADDLW 0x34
#define CH7322_ADDLW_MASK 0xf0
#define CH7322_ADDLR 0x3d
#define CH7322_ADDLR_HPD 0x80
#define CH7322_ADDLR_MASK 0x0f
#define CH7322_INTDATA 0x3e
#define CH7322_INTDATA_MODE 0x80
#define CH7322_INTDATA_STDBY 0x40
#define CH7322_INTDATA_HPDFALL 0x20
#define CH7322_INTDATA_HPDRISE 0x10
#define CH7322_INTDATA_RXMSG 0x08
#define CH7322_INTDATA_TXMSG 0x04
#define CH7322_INTDATA_NEWPHA 0x02
#define CH7322_INTDATA_ERROR 0x01
#define CH7322_EVENT 0x3f
#define CH7322_EVENT_TXERR 0x80
#define CH7322_EVENT_HRST 0x40
#define CH7322_EVENT_HFST 0x20
#define CH7322_EVENT_PHACHG 0x10
#define CH7322_EVENT_ACTST 0x08
#define CH7322_EVENT_PHARDY 0x04
#define CH7322_EVENT_BSOK 0x02
#define CH7322_EVENT_ERRADCF 0x01
#define CH7322_DID 0x51
#define CH7322_DID_CH7322 0x5b
#define CH7322_DID_CH7323 0x5f
#define CH7322_REVISIONID 0x52
#define CH7322_PARH 0x53
#define CH7322_PARL 0x54
#define CH7322_IOCFG2 0x75
#define CH7322_IOCFG_CIO 0x80
#define CH7322_IOCFG_IOCFGMASK 0x78
#define CH7322_IOCFG_AUDIO 0x04
#define CH7322_IOCFG_SPAMST 0x02
#define CH7322_IOCFG_SPAMSP 0x01
#define CH7322_CTL3 0x7b
#define CH7322_CTL3_SWENA 0x80
#define CH7322_CTL3_FC_INIT 0x40
#define CH7322_CTL3_SML_FL 0x20
#define CH7322_CTL3_SM_RDST 0x10
#define CH7322_CTL3_SPP_CIAH 0x08
#define CH7322_CTL3_SPP_CIAL 0x04
#define CH7322_CTL3_SPP_ACTH 0x02
#define CH7322_CTL3_SPP_ACTL 0x01
/* BOK status means NACK */
#define CH7322_TX_FLAG_NACK BIT(0)
/* Device will retry automatically */
#define CH7322_TX_FLAG_RETRY BIT(1)
struct ch7322 {
struct i2c_client *i2c;
struct regmap *regmap;
struct cec_adapter *cec;
struct mutex mutex; /* device access mutex */
u8 tx_flags;
};
static const struct regmap_config ch7322_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x7f,
.disable_locking = true,
};
static int ch7322_send_message(struct ch7322 *ch7322, const struct cec_msg *msg)
{
unsigned int val;
unsigned int len = msg->len;
int ret;
int i;
WARN_ON(!mutex_is_locked(&ch7322->mutex));
if (len > CH7322_WRBUF_LEN || len < 1)
return -EINVAL;
ret = regmap_read(ch7322->regmap, CH7322_WRITE, &val);
if (ret)
return ret;
/* Buffer not ready */
if (!(val & CH7322_WRITE_MSENT))
return -EBUSY;
if (cec_msg_opcode(msg) == -1 &&
cec_msg_initiator(msg) == cec_msg_destination(msg)) {
ch7322->tx_flags = CH7322_TX_FLAG_NACK | CH7322_TX_FLAG_RETRY;
} else if (cec_msg_is_broadcast(msg)) {
ch7322->tx_flags = CH7322_TX_FLAG_NACK;
} else {
ch7322->tx_flags = CH7322_TX_FLAG_RETRY;
}
ret = regmap_write(ch7322->regmap, CH7322_WRITE, len - 1);
if (ret)
return ret;
for (i = 0; i < len; i++) {
ret = regmap_write(ch7322->regmap,
CH7322_WRBUF + i, msg->msg[i]);
if (ret)
return ret;
}
return 0;
}
static int ch7322_receive_message(struct ch7322 *ch7322, struct cec_msg *msg)
{
unsigned int val;
int ret = 0;
int i;
WARN_ON(!mutex_is_locked(&ch7322->mutex));
ret = regmap_read(ch7322->regmap, CH7322_READ, &val);
if (ret)
return ret;
/* Message not ready */
if (!(val & CH7322_READ_NRDT))
return -EIO;
msg->len = (val & CH7322_READ_NMASK) + 1;
/* Read entire RDBUF to clear state */
for (i = 0; i < CH7322_RDBUF_LEN; i++) {
ret = regmap_read(ch7322->regmap, CH7322_RDBUF + i, &val);
if (ret)
return ret;
msg->msg[i] = (u8)val;
}
return 0;
}
static void ch7322_tx_done(struct ch7322 *ch7322)
{
int ret;
unsigned int val;
u8 status, flags;
mutex_lock(&ch7322->mutex);
ret = regmap_read(ch7322->regmap, CH7322_WRITE, &val);
flags = ch7322->tx_flags;
mutex_unlock(&ch7322->mutex);
/*
* The device returns a one-bit OK status which usually means ACK but
* actually means NACK when sending a logical address query or a
* broadcast.
*/
if (ret)
status = CEC_TX_STATUS_ERROR;
else if ((val & CH7322_WRITE_BOK) && (flags & CH7322_TX_FLAG_NACK))
status = CEC_TX_STATUS_NACK;
else if (val & CH7322_WRITE_BOK)
status = CEC_TX_STATUS_OK;
else if (flags & CH7322_TX_FLAG_NACK)
status = CEC_TX_STATUS_OK;
else
status = CEC_TX_STATUS_NACK;
if (status == CEC_TX_STATUS_NACK && (flags & CH7322_TX_FLAG_RETRY))
status |= CEC_TX_STATUS_MAX_RETRIES;
cec_transmit_attempt_done(ch7322->cec, status);
}
static void ch7322_rx_done(struct ch7322 *ch7322)
{
struct cec_msg msg;
int ret;
mutex_lock(&ch7322->mutex);
ret = ch7322_receive_message(ch7322, &msg);
mutex_unlock(&ch7322->mutex);
if (ret)
dev_err(&ch7322->i2c->dev, "cec receive error: %d\n", ret);
else
cec_received_msg(ch7322->cec, &msg);
}
/*
* This device can either monitor the DDC lines to obtain the physical address
* or it can allow the host to program it. This driver lets the device obtain
* it.
*/
static void ch7322_phys_addr(struct ch7322 *ch7322)
{
unsigned int pah, pal;
int ret = 0;
mutex_lock(&ch7322->mutex);
ret |= regmap_read(ch7322->regmap, CH7322_PARH, &pah);
ret |= regmap_read(ch7322->regmap, CH7322_PARL, &pal);
mutex_unlock(&ch7322->mutex);
if (ret)
dev_err(&ch7322->i2c->dev, "phys addr error\n");
else
cec_s_phys_addr(ch7322->cec, pal | (pah << 8), false);
}
static irqreturn_t ch7322_irq(int irq, void *dev)
{
struct ch7322 *ch7322 = dev;
unsigned int data = 0;
mutex_lock(&ch7322->mutex);
regmap_read(ch7322->regmap, CH7322_INTDATA, &data);
regmap_write(ch7322->regmap, CH7322_INTDATA, data);
mutex_unlock(&ch7322->mutex);
if (data & CH7322_INTDATA_HPDFALL)
cec_phys_addr_invalidate(ch7322->cec);
if (data & CH7322_INTDATA_TXMSG)
ch7322_tx_done(ch7322);
if (data & CH7322_INTDATA_RXMSG)
ch7322_rx_done(ch7322);
if (data & CH7322_INTDATA_NEWPHA)
ch7322_phys_addr(ch7322);
if (data & CH7322_INTDATA_ERROR)
dev_dbg(&ch7322->i2c->dev, "unknown error\n");
return IRQ_HANDLED;
}
/* This device is always enabled */
static int ch7322_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
return 0;
}
static int ch7322_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct ch7322 *ch7322 = cec_get_drvdata(adap);
int ret;
mutex_lock(&ch7322->mutex);
ret = regmap_update_bits(ch7322->regmap, CH7322_ADDLW,
CH7322_ADDLW_MASK, log_addr << 4);
mutex_unlock(&ch7322->mutex);
return ret;
}
static int ch7322_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct ch7322 *ch7322 = cec_get_drvdata(adap);
int ret;
mutex_lock(&ch7322->mutex);
ret = ch7322_send_message(ch7322, msg);
mutex_unlock(&ch7322->mutex);
return ret;
}
static const struct cec_adap_ops ch7322_cec_adap_ops = {
.adap_enable = ch7322_cec_adap_enable,
.adap_log_addr = ch7322_cec_adap_log_addr,
.adap_transmit = ch7322_cec_adap_transmit,
};
#if IS_ENABLED(CONFIG_PCI) && IS_ENABLED(CONFIG_DMI)
struct ch7322_conn_match {
const char *dev_name;
const char *pci_name;
const char *port_name;
};
static struct ch7322_conn_match google_endeavour[] = {
{ "i2c-PRP0001:00", "0000:00:02.0", "Port B" },
{ "i2c-PRP0001:01", "0000:00:02.0", "Port C" },
{ },
};
static const struct dmi_system_id ch7322_dmi_table[] = {
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Google"),
DMI_MATCH(DMI_BOARD_NAME, "Endeavour"),
},
.driver_data = google_endeavour,
},
{ },
};
/* Make a best-effort attempt to locate a matching HDMI port */
static int ch7322_get_port(struct i2c_client *client,
struct device **dev,
const char **port)
{
const struct dmi_system_id *system;
const struct ch7322_conn_match *conn;
*dev = NULL;
*port = NULL;
system = dmi_first_match(ch7322_dmi_table);
if (!system)
return 0;
for (conn = system->driver_data; conn->dev_name; conn++) {
if (!strcmp(dev_name(&client->dev), conn->dev_name)) {
struct device *d;
d = bus_find_device_by_name(&pci_bus_type, NULL,
conn->pci_name);
if (!d)
return -EPROBE_DEFER;
put_device(d);
*dev = d;
*port = conn->port_name;
return 0;
}
}
return 0;
}
#else
static int ch7322_get_port(struct i2c_client *client,
struct device **dev,
const char **port)
{
*dev = NULL;
*port = NULL;
return 0;
}
#endif
static int ch7322_probe(struct i2c_client *client)
{
struct device *hdmi_dev;
const char *port_name;
struct ch7322 *ch7322;
struct cec_notifier *notifier = NULL;
u32 caps = CEC_CAP_DEFAULTS;
int ret;
unsigned int val;
ret = ch7322_get_port(client, &hdmi_dev, &port_name);
if (ret)
return ret;
if (hdmi_dev)
caps |= CEC_CAP_CONNECTOR_INFO;
ch7322 = devm_kzalloc(&client->dev, sizeof(*ch7322), GFP_KERNEL);
if (!ch7322)
return -ENOMEM;
ch7322->regmap = devm_regmap_init_i2c(client, &ch7322_regmap);
if (IS_ERR(ch7322->regmap))
return PTR_ERR(ch7322->regmap);
ret = regmap_read(ch7322->regmap, CH7322_DID, &val);
if (ret)
return ret;
if (val != CH7322_DID_CH7322)
return -EOPNOTSUPP;
mutex_init(&ch7322->mutex);
ch7322->i2c = client;
ch7322->tx_flags = 0;
i2c_set_clientdata(client, ch7322);
/* Disable auto mode */
ret = regmap_write(ch7322->regmap, CH7322_MODE, CH7322_MODE_SW);
if (ret)
goto err_mutex;
/* Enable logical address register */
ret = regmap_update_bits(ch7322->regmap, CH7322_CTL,
CH7322_CTL_SPADL, CH7322_CTL_SPADL);
if (ret)
goto err_mutex;
ch7322->cec = cec_allocate_adapter(&ch7322_cec_adap_ops, ch7322,
dev_name(&client->dev),
caps, 1);
if (IS_ERR(ch7322->cec)) {
ret = PTR_ERR(ch7322->cec);
goto err_mutex;
}
ch7322->cec->adap_controls_phys_addr = true;
if (hdmi_dev) {
notifier = cec_notifier_cec_adap_register(hdmi_dev,
port_name,
ch7322->cec);
if (!notifier) {
ret = -ENOMEM;
goto err_cec;
}
}
/* Configure, mask, and clear interrupt */
ret = regmap_write(ch7322->regmap, CH7322_CFG1, 0);
if (ret)
goto err_notifier;
ret = regmap_write(ch7322->regmap, CH7322_INTCTL, CH7322_INTCTL_INTPB);
if (ret)
goto err_notifier;
ret = regmap_write(ch7322->regmap, CH7322_INTDATA, 0xff);
if (ret)
goto err_notifier;
/* If HPD is up read physical address */
ret = regmap_read(ch7322->regmap, CH7322_ADDLR, &val);
if (ret)
goto err_notifier;
if (val & CH7322_ADDLR_HPD)
ch7322_phys_addr(ch7322);
ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
ch7322_irq,
IRQF_ONESHOT | IRQF_TRIGGER_RISING,
client->name, ch7322);
if (ret)
goto err_notifier;
/* Unmask interrupt */
mutex_lock(&ch7322->mutex);
ret = regmap_write(ch7322->regmap, CH7322_INTCTL, 0xff);
mutex_unlock(&ch7322->mutex);
if (ret)
goto err_notifier;
ret = cec_register_adapter(ch7322->cec, &client->dev);
if (ret)
goto err_notifier;
dev_info(&client->dev, "device registered\n");
return 0;
err_notifier:
if (notifier)
cec_notifier_cec_adap_unregister(notifier, ch7322->cec);
err_cec:
cec_delete_adapter(ch7322->cec);
err_mutex:
mutex_destroy(&ch7322->mutex);
return ret;
}
static void ch7322_remove(struct i2c_client *client)
{
struct ch7322 *ch7322 = i2c_get_clientdata(client);
/* Mask interrupt */
mutex_lock(&ch7322->mutex);
regmap_write(ch7322->regmap, CH7322_INTCTL, CH7322_INTCTL_INTPB);
mutex_unlock(&ch7322->mutex);
cec_unregister_adapter(ch7322->cec);
mutex_destroy(&ch7322->mutex);
dev_info(&client->dev, "device unregistered\n");
}
static const struct of_device_id ch7322_of_match[] = {
{ .compatible = "chrontel,ch7322", },
{},
};
MODULE_DEVICE_TABLE(of, ch7322_of_match);
static struct i2c_driver ch7322_i2c_driver = {
.driver = {
.name = "ch7322",
.of_match_table = ch7322_of_match,
},
.probe = ch7322_probe,
.remove = ch7322_remove,
};
module_i2c_driver(ch7322_i2c_driver);
MODULE_DESCRIPTION("Chrontel CH7322 CEC Controller Driver");
MODULE_AUTHOR("Jeff Chase <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/cec/i2c/ch7322.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Pulse Eight HDMI CEC driver
*
* Copyright 2016 Hans Verkuil <[email protected]
*/
/*
* Notes:
*
* - Devices with firmware version < 2 do not store their configuration in
* EEPROM.
*
* - In autonomous mode, only messages from a TV will be acknowledged, even
* polling messages. Upon receiving a message from a TV, the dongle will
* respond to messages from any logical address.
*
* - In autonomous mode, the dongle will by default reply Feature Abort
* [Unrecognized Opcode] when it receives Give Device Vendor ID. It will
* however observe vendor ID's reported by other devices and possibly
* alter this behavior. When TV's (and TV's only) report that their vendor ID
* is LG (0x00e091), the dongle will itself reply that it has the same vendor
* ID, and it will respond to at least one vendor specific command.
*
* - In autonomous mode, the dongle is known to attempt wakeup if it receives
* <User Control Pressed> ["Power On"], ["Power] or ["Power Toggle"], or if it
* receives <Set Stream Path> with its own physical address. It also does this
* if it receives <Vendor Specific Command> [0x03 0x00] from an LG TV.
*/
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/serio.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <media/cec.h>
MODULE_AUTHOR("Hans Verkuil <[email protected]>");
MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
MODULE_LICENSE("GPL");
static int debug;
static int persistent_config;
module_param(debug, int, 0644);
module_param(persistent_config, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_PARM_DESC(persistent_config, "read config from persistent memory (0-1)");
enum pulse8_msgcodes {
MSGCODE_NOTHING = 0,
MSGCODE_PING,
MSGCODE_TIMEOUT_ERROR,
MSGCODE_HIGH_ERROR,
MSGCODE_LOW_ERROR,
MSGCODE_FRAME_START,
MSGCODE_FRAME_DATA,
MSGCODE_RECEIVE_FAILED,
MSGCODE_COMMAND_ACCEPTED, /* 0x08 */
MSGCODE_COMMAND_REJECTED,
MSGCODE_SET_ACK_MASK,
MSGCODE_TRANSMIT,
MSGCODE_TRANSMIT_EOM,
MSGCODE_TRANSMIT_IDLETIME,
MSGCODE_TRANSMIT_ACK_POLARITY,
MSGCODE_TRANSMIT_LINE_TIMEOUT,
MSGCODE_TRANSMIT_SUCCEEDED, /* 0x10 */
MSGCODE_TRANSMIT_FAILED_LINE,
MSGCODE_TRANSMIT_FAILED_ACK,
MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA,
MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE,
MSGCODE_FIRMWARE_VERSION,
MSGCODE_START_BOOTLOADER,
MSGCODE_GET_BUILDDATE,
MSGCODE_SET_CONTROLLED, /* 0x18 */
MSGCODE_GET_AUTO_ENABLED,
MSGCODE_SET_AUTO_ENABLED,
MSGCODE_GET_DEFAULT_LOGICAL_ADDRESS,
MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS,
MSGCODE_GET_LOGICAL_ADDRESS_MASK,
MSGCODE_SET_LOGICAL_ADDRESS_MASK,
MSGCODE_GET_PHYSICAL_ADDRESS,
MSGCODE_SET_PHYSICAL_ADDRESS, /* 0x20 */
MSGCODE_GET_DEVICE_TYPE,
MSGCODE_SET_DEVICE_TYPE,
MSGCODE_GET_HDMI_VERSION, /* Removed in FW >= 10 */
MSGCODE_SET_HDMI_VERSION,
MSGCODE_GET_OSD_NAME,
MSGCODE_SET_OSD_NAME,
MSGCODE_WRITE_EEPROM,
MSGCODE_GET_ADAPTER_TYPE, /* 0x28 */
MSGCODE_SET_ACTIVE_SOURCE,
MSGCODE_GET_AUTO_POWER_ON, /* New for FW >= 10 */
MSGCODE_SET_AUTO_POWER_ON,
MSGCODE_FRAME_EOM = 0x80,
MSGCODE_FRAME_ACK = 0x40,
};
static const char * const pulse8_msgnames[] = {
"NOTHING",
"PING",
"TIMEOUT_ERROR",
"HIGH_ERROR",
"LOW_ERROR",
"FRAME_START",
"FRAME_DATA",
"RECEIVE_FAILED",
"COMMAND_ACCEPTED",
"COMMAND_REJECTED",
"SET_ACK_MASK",
"TRANSMIT",
"TRANSMIT_EOM",
"TRANSMIT_IDLETIME",
"TRANSMIT_ACK_POLARITY",
"TRANSMIT_LINE_TIMEOUT",
"TRANSMIT_SUCCEEDED",
"TRANSMIT_FAILED_LINE",
"TRANSMIT_FAILED_ACK",
"TRANSMIT_FAILED_TIMEOUT_DATA",
"TRANSMIT_FAILED_TIMEOUT_LINE",
"FIRMWARE_VERSION",
"START_BOOTLOADER",
"GET_BUILDDATE",
"SET_CONTROLLED",
"GET_AUTO_ENABLED",
"SET_AUTO_ENABLED",
"GET_DEFAULT_LOGICAL_ADDRESS",
"SET_DEFAULT_LOGICAL_ADDRESS",
"GET_LOGICAL_ADDRESS_MASK",
"SET_LOGICAL_ADDRESS_MASK",
"GET_PHYSICAL_ADDRESS",
"SET_PHYSICAL_ADDRESS",
"GET_DEVICE_TYPE",
"SET_DEVICE_TYPE",
"GET_HDMI_VERSION",
"SET_HDMI_VERSION",
"GET_OSD_NAME",
"SET_OSD_NAME",
"WRITE_EEPROM",
"GET_ADAPTER_TYPE",
"SET_ACTIVE_SOURCE",
"GET_AUTO_POWER_ON",
"SET_AUTO_POWER_ON",
};
static const char *pulse8_msgname(u8 cmd)
{
static char unknown_msg[5];
if ((cmd & 0x3f) < ARRAY_SIZE(pulse8_msgnames))
return pulse8_msgnames[cmd & 0x3f];
snprintf(unknown_msg, sizeof(unknown_msg), "0x%02x", cmd);
return unknown_msg;
}
#define MSGSTART 0xff
#define MSGEND 0xfe
#define MSGESC 0xfd
#define MSGOFFSET 3
#define DATA_SIZE 256
#define PING_PERIOD (15 * HZ)
#define NUM_MSGS 8
struct pulse8 {
struct device *dev;
struct serio *serio;
struct cec_adapter *adap;
unsigned int vers;
struct delayed_work ping_eeprom_work;
struct work_struct irq_work;
struct cec_msg rx_msg[NUM_MSGS];
unsigned int rx_msg_cur_idx, rx_msg_num;
/* protect rx_msg_cur_idx and rx_msg_num */
spinlock_t msg_lock;
u8 new_rx_msg[CEC_MAX_MSG_SIZE];
u8 new_rx_msg_len;
struct work_struct tx_work;
u32 tx_done_status;
u32 tx_signal_free_time;
struct cec_msg tx_msg;
bool tx_msg_is_bcast;
struct completion cmd_done;
u8 data[DATA_SIZE];
unsigned int len;
u8 buf[DATA_SIZE];
unsigned int idx;
bool escape;
bool started;
/* locks access to the adapter */
struct mutex lock;
bool config_pending;
bool restoring_config;
bool autonomous;
};
static int pulse8_send(struct serio *serio, const u8 *command, u8 cmd_len)
{
int err = 0;
err = serio_write(serio, MSGSTART);
if (err)
return err;
for (; !err && cmd_len; command++, cmd_len--) {
if (*command >= MSGESC) {
err = serio_write(serio, MSGESC);
if (!err)
err = serio_write(serio, *command - MSGOFFSET);
} else {
err = serio_write(serio, *command);
}
}
if (!err)
err = serio_write(serio, MSGEND);
return err;
}
static int pulse8_send_and_wait_once(struct pulse8 *pulse8,
const u8 *cmd, u8 cmd_len,
u8 response, u8 size)
{
int err;
if (debug > 1)
dev_info(pulse8->dev, "transmit %s: %*ph\n",
pulse8_msgname(cmd[0]), cmd_len, cmd);
init_completion(&pulse8->cmd_done);
err = pulse8_send(pulse8->serio, cmd, cmd_len);
if (err)
return err;
if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ))
return -ETIMEDOUT;
if ((pulse8->data[0] & 0x3f) == MSGCODE_COMMAND_REJECTED &&
cmd[0] != MSGCODE_SET_CONTROLLED &&
cmd[0] != MSGCODE_SET_AUTO_ENABLED &&
cmd[0] != MSGCODE_GET_BUILDDATE)
return -ENOTTY;
if (response &&
((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) {
dev_info(pulse8->dev, "transmit %s failed with %s\n",
pulse8_msgname(cmd[0]),
pulse8_msgname(pulse8->data[0]));
return -EIO;
}
return 0;
}
static int pulse8_send_and_wait(struct pulse8 *pulse8,
const u8 *cmd, u8 cmd_len, u8 response, u8 size)
{
u8 cmd_sc[2];
int err;
err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, response, size);
if (err != -ENOTTY)
return err;
cmd_sc[0] = MSGCODE_SET_CONTROLLED;
cmd_sc[1] = 1;
err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2,
MSGCODE_COMMAND_ACCEPTED, 1);
if (!err)
err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len,
response, size);
return err == -ENOTTY ? -EIO : err;
}
static void pulse8_tx_work_handler(struct work_struct *work)
{
struct pulse8 *pulse8 = container_of(work, struct pulse8, tx_work);
struct cec_msg *msg = &pulse8->tx_msg;
unsigned int i;
u8 cmd[2];
int err;
if (msg->len == 0)
return;
mutex_lock(&pulse8->lock);
cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
cmd[1] = pulse8->tx_signal_free_time;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 1);
cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
cmd[1] = cec_msg_is_broadcast(msg);
pulse8->tx_msg_is_bcast = cec_msg_is_broadcast(msg);
if (!err)
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 1);
cmd[0] = msg->len == 1 ? MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
cmd[1] = msg->msg[0];
if (!err)
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 1);
if (!err && msg->len > 1) {
for (i = 1; !err && i < msg->len; i++) {
cmd[0] = ((i == msg->len - 1)) ?
MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
cmd[1] = msg->msg[i];
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 1);
}
}
if (err && debug)
dev_info(pulse8->dev, "%s(0x%02x) failed with error %d for msg %*ph\n",
pulse8_msgname(cmd[0]), cmd[1],
err, msg->len, msg->msg);
msg->len = 0;
mutex_unlock(&pulse8->lock);
if (err)
cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_ERROR);
}
static void pulse8_irq_work_handler(struct work_struct *work)
{
struct pulse8 *pulse8 =
container_of(work, struct pulse8, irq_work);
unsigned long flags;
u32 status;
spin_lock_irqsave(&pulse8->msg_lock, flags);
while (pulse8->rx_msg_num) {
spin_unlock_irqrestore(&pulse8->msg_lock, flags);
if (debug)
dev_info(pulse8->dev, "adap received %*ph\n",
pulse8->rx_msg[pulse8->rx_msg_cur_idx].len,
pulse8->rx_msg[pulse8->rx_msg_cur_idx].msg);
cec_received_msg(pulse8->adap,
&pulse8->rx_msg[pulse8->rx_msg_cur_idx]);
spin_lock_irqsave(&pulse8->msg_lock, flags);
if (pulse8->rx_msg_num)
pulse8->rx_msg_num--;
pulse8->rx_msg_cur_idx =
(pulse8->rx_msg_cur_idx + 1) % NUM_MSGS;
}
spin_unlock_irqrestore(&pulse8->msg_lock, flags);
mutex_lock(&pulse8->lock);
status = pulse8->tx_done_status;
pulse8->tx_done_status = 0;
mutex_unlock(&pulse8->lock);
if (status)
cec_transmit_attempt_done(pulse8->adap, status);
}
static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
unsigned int flags)
{
struct pulse8 *pulse8 = serio_get_drvdata(serio);
unsigned long irq_flags;
unsigned int idx;
if (!pulse8->started && data != MSGSTART)
return IRQ_HANDLED;
if (data == MSGESC) {
pulse8->escape = true;
return IRQ_HANDLED;
}
if (pulse8->escape) {
data += MSGOFFSET;
pulse8->escape = false;
} else if (data == MSGEND) {
u8 msgcode = pulse8->buf[0];
if (debug > 1)
dev_info(pulse8->dev, "received %s: %*ph\n",
pulse8_msgname(msgcode),
pulse8->idx, pulse8->buf);
switch (msgcode & 0x3f) {
case MSGCODE_FRAME_START:
/*
* Test if we are receiving a new msg when a previous
* message is still pending.
*/
if (!(msgcode & MSGCODE_FRAME_EOM)) {
pulse8->new_rx_msg_len = 1;
pulse8->new_rx_msg[0] = pulse8->buf[1];
break;
}
fallthrough;
case MSGCODE_FRAME_DATA:
if (pulse8->new_rx_msg_len < CEC_MAX_MSG_SIZE)
pulse8->new_rx_msg[pulse8->new_rx_msg_len++] =
pulse8->buf[1];
if (!(msgcode & MSGCODE_FRAME_EOM))
break;
spin_lock_irqsave(&pulse8->msg_lock, irq_flags);
idx = (pulse8->rx_msg_cur_idx + pulse8->rx_msg_num) %
NUM_MSGS;
if (pulse8->rx_msg_num == NUM_MSGS) {
dev_warn(pulse8->dev,
"message queue is full, dropping %*ph\n",
pulse8->new_rx_msg_len,
pulse8->new_rx_msg);
spin_unlock_irqrestore(&pulse8->msg_lock,
irq_flags);
pulse8->new_rx_msg_len = 0;
break;
}
pulse8->rx_msg_num++;
memcpy(pulse8->rx_msg[idx].msg, pulse8->new_rx_msg,
pulse8->new_rx_msg_len);
pulse8->rx_msg[idx].len = pulse8->new_rx_msg_len;
spin_unlock_irqrestore(&pulse8->msg_lock, irq_flags);
schedule_work(&pulse8->irq_work);
pulse8->new_rx_msg_len = 0;
break;
case MSGCODE_TRANSMIT_SUCCEEDED:
WARN_ON(pulse8->tx_done_status);
pulse8->tx_done_status = CEC_TX_STATUS_OK;
schedule_work(&pulse8->irq_work);
break;
case MSGCODE_TRANSMIT_FAILED_ACK:
/*
* A NACK for a broadcast message makes no sense, these
* seem to be spurious messages and are skipped.
*/
if (pulse8->tx_msg_is_bcast)
break;
WARN_ON(pulse8->tx_done_status);
pulse8->tx_done_status = CEC_TX_STATUS_NACK;
schedule_work(&pulse8->irq_work);
break;
case MSGCODE_TRANSMIT_FAILED_LINE:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
WARN_ON(pulse8->tx_done_status);
pulse8->tx_done_status = CEC_TX_STATUS_ERROR;
schedule_work(&pulse8->irq_work);
break;
case MSGCODE_HIGH_ERROR:
case MSGCODE_LOW_ERROR:
case MSGCODE_RECEIVE_FAILED:
case MSGCODE_TIMEOUT_ERROR:
pulse8->new_rx_msg_len = 0;
break;
case MSGCODE_COMMAND_ACCEPTED:
case MSGCODE_COMMAND_REJECTED:
default:
if (pulse8->idx == 0)
break;
memcpy(pulse8->data, pulse8->buf, pulse8->idx);
pulse8->len = pulse8->idx;
complete(&pulse8->cmd_done);
break;
}
pulse8->idx = 0;
pulse8->started = false;
return IRQ_HANDLED;
} else if (data == MSGSTART) {
pulse8->idx = 0;
pulse8->started = true;
return IRQ_HANDLED;
}
if (pulse8->idx >= DATA_SIZE) {
dev_dbg(pulse8->dev,
"throwing away %d bytes of garbage\n", pulse8->idx);
pulse8->idx = 0;
}
pulse8->buf[pulse8->idx++] = data;
return IRQ_HANDLED;
}
static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct pulse8 *pulse8 = cec_get_drvdata(adap);
u8 cmd[16];
int err;
mutex_lock(&pulse8->lock);
cmd[0] = MSGCODE_SET_CONTROLLED;
cmd[1] = enable;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 1);
if (!enable) {
pulse8->rx_msg_num = 0;
pulse8->tx_done_status = 0;
}
mutex_unlock(&pulse8->lock);
return enable ? err : 0;
}
static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct pulse8 *pulse8 = cec_get_drvdata(adap);
u16 mask = 0;
u16 pa = adap->phys_addr;
u8 cmd[16];
int err = 0;
mutex_lock(&pulse8->lock);
if (log_addr != CEC_LOG_ADDR_INVALID)
mask = 1 << log_addr;
cmd[0] = MSGCODE_SET_ACK_MASK;
cmd[1] = mask >> 8;
cmd[2] = mask & 0xff;
err = pulse8_send_and_wait(pulse8, cmd, 3,
MSGCODE_COMMAND_ACCEPTED, 0);
if ((err && mask != 0) || pulse8->restoring_config)
goto unlock;
cmd[0] = MSGCODE_SET_AUTO_ENABLED;
cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
goto unlock;
pulse8->autonomous = cmd[1];
if (log_addr == CEC_LOG_ADDR_INVALID)
goto unlock;
cmd[0] = MSGCODE_SET_DEVICE_TYPE;
cmd[1] = adap->log_addrs.primary_device_type[0];
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
goto unlock;
switch (adap->log_addrs.primary_device_type[0]) {
case CEC_OP_PRIM_DEVTYPE_TV:
mask = CEC_LOG_ADDR_MASK_TV;
break;
case CEC_OP_PRIM_DEVTYPE_RECORD:
mask = CEC_LOG_ADDR_MASK_RECORD;
break;
case CEC_OP_PRIM_DEVTYPE_TUNER:
mask = CEC_LOG_ADDR_MASK_TUNER;
break;
case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
mask = CEC_LOG_ADDR_MASK_PLAYBACK;
break;
case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
break;
case CEC_OP_PRIM_DEVTYPE_SWITCH:
mask = CEC_LOG_ADDR_MASK_UNREGISTERED;
break;
case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
mask = CEC_LOG_ADDR_MASK_SPECIFIC;
break;
default:
mask = 0;
break;
}
cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK;
cmd[1] = mask >> 8;
cmd[2] = mask & 0xff;
err = pulse8_send_and_wait(pulse8, cmd, 3,
MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
goto unlock;
cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS;
cmd[1] = log_addr;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
goto unlock;
cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS;
cmd[1] = pa >> 8;
cmd[2] = pa & 0xff;
err = pulse8_send_and_wait(pulse8, cmd, 3,
MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
goto unlock;
if (pulse8->vers < 10) {
cmd[0] = MSGCODE_SET_HDMI_VERSION;
cmd[1] = adap->log_addrs.cec_version;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
goto unlock;
}
if (adap->log_addrs.osd_name[0]) {
size_t osd_len = strlen(adap->log_addrs.osd_name);
char *osd_str = cmd + 1;
cmd[0] = MSGCODE_SET_OSD_NAME;
strscpy(cmd + 1, adap->log_addrs.osd_name, sizeof(cmd) - 1);
if (osd_len < 4) {
memset(osd_str + osd_len, ' ', 4 - osd_len);
osd_len = 4;
osd_str[osd_len] = '\0';
strscpy(adap->log_addrs.osd_name, osd_str,
sizeof(adap->log_addrs.osd_name));
}
err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len,
MSGCODE_COMMAND_ACCEPTED, 0);
if (err)
goto unlock;
}
unlock:
if (pulse8->restoring_config)
pulse8->restoring_config = false;
else
pulse8->config_pending = true;
mutex_unlock(&pulse8->lock);
return log_addr == CEC_LOG_ADDR_INVALID ? 0 : err;
}
static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct pulse8 *pulse8 = cec_get_drvdata(adap);
pulse8->tx_msg = *msg;
if (debug)
dev_info(pulse8->dev, "adap transmit %*ph\n",
msg->len, msg->msg);
pulse8->tx_signal_free_time = signal_free_time;
schedule_work(&pulse8->tx_work);
return 0;
}
static void pulse8_cec_adap_free(struct cec_adapter *adap)
{
struct pulse8 *pulse8 = cec_get_drvdata(adap);
cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
cancel_work_sync(&pulse8->irq_work);
cancel_work_sync(&pulse8->tx_work);
kfree(pulse8);
}
static const struct cec_adap_ops pulse8_cec_adap_ops = {
.adap_enable = pulse8_cec_adap_enable,
.adap_log_addr = pulse8_cec_adap_log_addr,
.adap_transmit = pulse8_cec_adap_transmit,
.adap_free = pulse8_cec_adap_free,
};
static void pulse8_disconnect(struct serio *serio)
{
struct pulse8 *pulse8 = serio_get_drvdata(serio);
cec_unregister_adapter(pulse8->adap);
serio_set_drvdata(serio, NULL);
serio_close(serio);
}
static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
struct cec_log_addrs *log_addrs, u16 *pa)
{
u8 *data = pulse8->data + 1;
u8 cmd[2];
int err;
time64_t date;
pulse8->vers = 0;
cmd[0] = MSGCODE_FIRMWARE_VERSION;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
if (err)
return err;
pulse8->vers = (data[0] << 8) | data[1];
dev_info(pulse8->dev, "Firmware version %04x\n", pulse8->vers);
if (pulse8->vers < 2) {
*pa = CEC_PHYS_ADDR_INVALID;
return 0;
}
cmd[0] = MSGCODE_GET_BUILDDATE;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4);
if (err)
return err;
date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
dev_info(pulse8->dev, "Firmware build date %ptT\n", &date);
dev_dbg(pulse8->dev, "Persistent config:\n");
cmd[0] = MSGCODE_GET_AUTO_ENABLED;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
return err;
pulse8->autonomous = data[0];
dev_dbg(pulse8->dev, "Autonomous mode: %s",
data[0] ? "on" : "off");
if (pulse8->vers >= 10) {
cmd[0] = MSGCODE_GET_AUTO_POWER_ON;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (!err)
dev_dbg(pulse8->dev, "Auto Power On: %s",
data[0] ? "on" : "off");
}
cmd[0] = MSGCODE_GET_DEVICE_TYPE;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
return err;
log_addrs->primary_device_type[0] = data[0];
dev_dbg(pulse8->dev, "Primary device type: %d\n", data[0]);
switch (log_addrs->primary_device_type[0]) {
case CEC_OP_PRIM_DEVTYPE_TV:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV;
log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TV;
break;
case CEC_OP_PRIM_DEVTYPE_RECORD:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_RECORD;
log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_RECORD;
break;
case CEC_OP_PRIM_DEVTYPE_TUNER:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TUNER;
log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TUNER;
break;
case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_PLAYBACK;
break;
case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM;
break;
case CEC_OP_PRIM_DEVTYPE_SWITCH:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
break;
case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_SPECIFIC;
log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
break;
default:
log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
dev_info(pulse8->dev, "Unknown Primary Device Type: %d\n",
log_addrs->primary_device_type[0]);
break;
}
cmd[0] = MSGCODE_GET_LOGICAL_ADDRESS_MASK;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
if (err)
return err;
log_addrs->log_addr_mask = (data[0] << 8) | data[1];
dev_dbg(pulse8->dev, "Logical address ACK mask: %x\n",
log_addrs->log_addr_mask);
if (log_addrs->log_addr_mask)
log_addrs->num_log_addrs = 1;
cmd[0] = MSGCODE_GET_PHYSICAL_ADDRESS;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
return err;
*pa = (data[0] << 8) | data[1];
dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
cec_phys_addr_exp(*pa));
log_addrs->cec_version = CEC_OP_CEC_VERSION_1_4;
if (pulse8->vers < 10) {
cmd[0] = MSGCODE_GET_HDMI_VERSION;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
if (err)
return err;
log_addrs->cec_version = data[0];
dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
}
cmd[0] = MSGCODE_GET_OSD_NAME;
err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
if (err)
return err;
strscpy(log_addrs->osd_name, data, sizeof(log_addrs->osd_name));
dev_dbg(pulse8->dev, "OSD name: %s\n", log_addrs->osd_name);
return 0;
}
static int pulse8_apply_persistent_config(struct pulse8 *pulse8,
struct cec_log_addrs *log_addrs,
u16 pa)
{
int err;
err = cec_s_log_addrs(pulse8->adap, log_addrs, false);
if (err)
return err;
cec_s_phys_addr(pulse8->adap, pa, false);
return 0;
}
static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
{
struct pulse8 *pulse8 =
container_of(work, struct pulse8, ping_eeprom_work.work);
u8 cmd;
mutex_lock(&pulse8->lock);
cmd = MSGCODE_PING;
if (pulse8_send_and_wait(pulse8, &cmd, 1,
MSGCODE_COMMAND_ACCEPTED, 0)) {
dev_warn(pulse8->dev, "failed to ping EEPROM\n");
goto unlock;
}
if (pulse8->vers < 2)
goto unlock;
if (pulse8->config_pending && persistent_config) {
dev_dbg(pulse8->dev, "writing pending config to EEPROM\n");
cmd = MSGCODE_WRITE_EEPROM;
if (pulse8_send_and_wait(pulse8, &cmd, 1,
MSGCODE_COMMAND_ACCEPTED, 0))
dev_info(pulse8->dev, "failed to write pending config to EEPROM\n");
else
pulse8->config_pending = false;
}
unlock:
schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
mutex_unlock(&pulse8->lock);
}
static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
{
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_CAP_MONITOR_ALL;
struct pulse8 *pulse8;
int err = -ENOMEM;
struct cec_log_addrs log_addrs = {};
u16 pa = CEC_PHYS_ADDR_INVALID;
pulse8 = kzalloc(sizeof(*pulse8), GFP_KERNEL);
if (!pulse8)
return -ENOMEM;
pulse8->serio = serio;
pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
dev_name(&serio->dev), caps, 1);
err = PTR_ERR_OR_ZERO(pulse8->adap);
if (err < 0) {
kfree(pulse8);
return err;
}
pulse8->dev = &serio->dev;
serio_set_drvdata(serio, pulse8);
INIT_WORK(&pulse8->irq_work, pulse8_irq_work_handler);
INIT_WORK(&pulse8->tx_work, pulse8_tx_work_handler);
INIT_DELAYED_WORK(&pulse8->ping_eeprom_work,
pulse8_ping_eeprom_work_handler);
mutex_init(&pulse8->lock);
spin_lock_init(&pulse8->msg_lock);
pulse8->config_pending = false;
err = serio_open(serio, drv);
if (err)
goto delete_adap;
err = pulse8_setup(pulse8, serio, &log_addrs, &pa);
if (err)
goto close_serio;
err = cec_register_adapter(pulse8->adap, &serio->dev);
if (err < 0)
goto close_serio;
pulse8->dev = &pulse8->adap->devnode.dev;
if (persistent_config && pulse8->autonomous) {
err = pulse8_apply_persistent_config(pulse8, &log_addrs, pa);
if (err)
goto close_serio;
pulse8->restoring_config = true;
}
schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
return 0;
close_serio:
pulse8->serio = NULL;
serio_set_drvdata(serio, NULL);
serio_close(serio);
delete_adap:
cec_delete_adapter(pulse8->adap);
return err;
}
static const struct serio_device_id pulse8_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_PULSE8_CEC,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, pulse8_serio_ids);
static struct serio_driver pulse8_drv = {
.driver = {
.name = "pulse8-cec",
},
.description = "Pulse Eight HDMI CEC driver",
.id_table = pulse8_serio_ids,
.interrupt = pulse8_interrupt,
.connect = pulse8_connect,
.disconnect = pulse8_disconnect,
};
module_serio_driver(pulse8_drv);
| linux-master | drivers/media/cec/usb/pulse8/pulse8-cec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RainShadow Tech HDMI CEC driver
*
* Copyright 2016 Hans Verkuil <[email protected]
*/
/*
* Notes:
*
* The higher level protocols are currently disabled. This can be added
* later, similar to how this is done for the Pulse Eight CEC driver.
*
* Documentation of the protocol is available here:
*
* http://rainshadowtech.com/doc/HDMICECtoUSBandRS232v2.0.pdf
*/
#include <linux/completion.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/serio.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/time.h>
#include <linux/workqueue.h>
#include <media/cec.h>
MODULE_AUTHOR("Hans Verkuil <[email protected]>");
MODULE_DESCRIPTION("RainShadow Tech HDMI CEC driver");
MODULE_LICENSE("GPL");
#define DATA_SIZE 256
struct rain {
struct device *dev;
struct serio *serio;
struct cec_adapter *adap;
struct completion cmd_done;
struct work_struct work;
/* Low-level ringbuffer, collecting incoming characters */
char buf[DATA_SIZE];
unsigned int buf_rd_idx;
unsigned int buf_wr_idx;
unsigned int buf_len;
spinlock_t buf_lock;
/* command buffer */
char cmd[DATA_SIZE];
unsigned int cmd_idx;
bool cmd_started;
/* reply to a command, only used to store the firmware version */
char cmd_reply[DATA_SIZE];
struct mutex write_lock;
};
static void rain_process_msg(struct rain *rain)
{
struct cec_msg msg = {};
const char *cmd = rain->cmd + 3;
int stat = -1;
for (; *cmd; cmd++) {
if (!isxdigit(*cmd))
continue;
if (isxdigit(cmd[0]) && isxdigit(cmd[1])) {
if (msg.len == CEC_MAX_MSG_SIZE)
break;
if (hex2bin(msg.msg + msg.len, cmd, 1))
continue;
msg.len++;
cmd++;
continue;
}
if (!cmd[1])
stat = hex_to_bin(cmd[0]);
break;
}
if (rain->cmd[0] == 'R') {
if (stat == 1 || stat == 2)
cec_received_msg(rain->adap, &msg);
return;
}
switch (stat) {
case 1:
cec_transmit_attempt_done(rain->adap, CEC_TX_STATUS_OK);
break;
case 2:
cec_transmit_attempt_done(rain->adap, CEC_TX_STATUS_NACK);
break;
default:
cec_transmit_attempt_done(rain->adap, CEC_TX_STATUS_LOW_DRIVE);
break;
}
}
static void rain_irq_work_handler(struct work_struct *work)
{
struct rain *rain =
container_of(work, struct rain, work);
while (true) {
unsigned long flags;
char data;
spin_lock_irqsave(&rain->buf_lock, flags);
if (!rain->buf_len) {
spin_unlock_irqrestore(&rain->buf_lock, flags);
break;
}
data = rain->buf[rain->buf_rd_idx];
rain->buf_len--;
rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff;
spin_unlock_irqrestore(&rain->buf_lock, flags);
if (!rain->cmd_started && data != '?')
continue;
switch (data) {
case '\r':
rain->cmd[rain->cmd_idx] = '\0';
dev_dbg(rain->dev, "received: %s\n", rain->cmd);
if (!memcmp(rain->cmd, "REC", 3) ||
!memcmp(rain->cmd, "STA", 3)) {
rain_process_msg(rain);
} else {
strscpy(rain->cmd_reply, rain->cmd,
sizeof(rain->cmd_reply));
complete(&rain->cmd_done);
}
rain->cmd_idx = 0;
rain->cmd_started = false;
break;
case '\n':
rain->cmd_idx = 0;
rain->cmd_started = false;
break;
case '?':
rain->cmd_idx = 0;
rain->cmd_started = true;
break;
default:
if (rain->cmd_idx >= DATA_SIZE - 1) {
dev_dbg(rain->dev,
"throwing away %d bytes of garbage\n", rain->cmd_idx);
rain->cmd_idx = 0;
}
rain->cmd[rain->cmd_idx++] = data;
break;
}
}
}
static irqreturn_t rain_interrupt(struct serio *serio, unsigned char data,
unsigned int flags)
{
struct rain *rain = serio_get_drvdata(serio);
if (rain->buf_len == DATA_SIZE) {
dev_warn_once(rain->dev, "buffer overflow\n");
return IRQ_HANDLED;
}
spin_lock(&rain->buf_lock);
rain->buf_len++;
rain->buf[rain->buf_wr_idx] = data;
rain->buf_wr_idx = (rain->buf_wr_idx + 1) & 0xff;
spin_unlock(&rain->buf_lock);
schedule_work(&rain->work);
return IRQ_HANDLED;
}
static void rain_disconnect(struct serio *serio)
{
struct rain *rain = serio_get_drvdata(serio);
cancel_work_sync(&rain->work);
cec_unregister_adapter(rain->adap);
dev_info(&serio->dev, "disconnected\n");
serio_close(serio);
serio_set_drvdata(serio, NULL);
kfree(rain);
}
static int rain_send(struct rain *rain, const char *command)
{
int err = serio_write(rain->serio, '!');
dev_dbg(rain->dev, "send: %s\n", command);
while (!err && *command)
err = serio_write(rain->serio, *command++);
if (!err)
err = serio_write(rain->serio, '~');
return err;
}
static int rain_send_and_wait(struct rain *rain,
const char *cmd, const char *reply)
{
int err;
init_completion(&rain->cmd_done);
mutex_lock(&rain->write_lock);
err = rain_send(rain, cmd);
if (err)
goto err;
if (!wait_for_completion_timeout(&rain->cmd_done, HZ)) {
err = -ETIMEDOUT;
goto err;
}
if (reply && strncmp(rain->cmd_reply, reply, strlen(reply))) {
dev_dbg(rain->dev,
"transmit of '%s': received '%s' instead of '%s'\n",
cmd, rain->cmd_reply, reply);
err = -EIO;
}
err:
mutex_unlock(&rain->write_lock);
return err;
}
static int rain_setup(struct rain *rain, struct serio *serio,
struct cec_log_addrs *log_addrs, u16 *pa)
{
int err;
err = rain_send_and_wait(rain, "R", "REV");
if (err)
return err;
dev_info(rain->dev, "Firmware version %s\n", rain->cmd_reply + 4);
err = rain_send_and_wait(rain, "Q 1", "QTY");
if (err)
return err;
err = rain_send_and_wait(rain, "c0000", "CFG");
if (err)
return err;
return rain_send_and_wait(rain, "A F 0000", "ADR");
}
static int rain_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
return 0;
}
static int rain_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct rain *rain = cec_get_drvdata(adap);
u8 cmd[16];
if (log_addr == CEC_LOG_ADDR_INVALID)
log_addr = CEC_LOG_ADDR_UNREGISTERED;
snprintf(cmd, sizeof(cmd), "A %x", log_addr);
return rain_send_and_wait(rain, cmd, "ADR");
}
static int rain_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct rain *rain = cec_get_drvdata(adap);
char cmd[2 * CEC_MAX_MSG_SIZE + 16];
unsigned int i;
int err;
if (msg->len == 1) {
snprintf(cmd, sizeof(cmd), "x%x", cec_msg_destination(msg));
} else {
char hex[3];
snprintf(cmd, sizeof(cmd), "x%x %02x ",
cec_msg_destination(msg), msg->msg[1]);
for (i = 2; i < msg->len; i++) {
snprintf(hex, sizeof(hex), "%02x", msg->msg[i]);
strlcat(cmd, hex, sizeof(cmd));
}
}
mutex_lock(&rain->write_lock);
err = rain_send(rain, cmd);
mutex_unlock(&rain->write_lock);
return err;
}
static const struct cec_adap_ops rain_cec_adap_ops = {
.adap_enable = rain_cec_adap_enable,
.adap_log_addr = rain_cec_adap_log_addr,
.adap_transmit = rain_cec_adap_transmit,
};
static int rain_connect(struct serio *serio, struct serio_driver *drv)
{
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_CAP_MONITOR_ALL;
struct rain *rain;
int err = -ENOMEM;
struct cec_log_addrs log_addrs = {};
u16 pa = CEC_PHYS_ADDR_INVALID;
rain = kzalloc(sizeof(*rain), GFP_KERNEL);
if (!rain)
return -ENOMEM;
rain->serio = serio;
rain->adap = cec_allocate_adapter(&rain_cec_adap_ops, rain,
dev_name(&serio->dev), caps, 1);
err = PTR_ERR_OR_ZERO(rain->adap);
if (err < 0)
goto free_device;
rain->dev = &serio->dev;
serio_set_drvdata(serio, rain);
INIT_WORK(&rain->work, rain_irq_work_handler);
mutex_init(&rain->write_lock);
spin_lock_init(&rain->buf_lock);
err = serio_open(serio, drv);
if (err)
goto delete_adap;
err = rain_setup(rain, serio, &log_addrs, &pa);
if (err)
goto close_serio;
err = cec_register_adapter(rain->adap, &serio->dev);
if (err < 0)
goto close_serio;
rain->dev = &rain->adap->devnode.dev;
return 0;
close_serio:
serio_close(serio);
delete_adap:
cec_delete_adapter(rain->adap);
serio_set_drvdata(serio, NULL);
free_device:
kfree(rain);
return err;
}
static const struct serio_device_id rain_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_RAINSHADOW_CEC,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, rain_serio_ids);
static struct serio_driver rain_drv = {
.driver = {
.name = "rainshadow-cec",
},
.description = "RainShadow Tech HDMI CEC driver",
.id_table = rain_serio_ids,
.interrupt = rain_interrupt,
.connect = rain_connect,
.disconnect = rain_disconnect,
};
module_serio_driver(rain_drv);
| linux-master | drivers/media/cec/usb/rainshadow/rainshadow-cec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* FSI hub master driver
*
* Copyright (C) IBM Corporation 2016
*/
#include <linux/delay.h>
#include <linux/fsi.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include "fsi-master.h"
#define FSI_ENGID_HUB_MASTER 0x1c
#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
/*
* FSI hub master support
*
* A hub master increases the number of potential target devices that the
* primary FSI master can access. For each link a primary master supports,
* each of those links can in turn be chained to a hub master with multiple
* links of its own.
*
* The hub is controlled by a set of control registers exposed as a regular fsi
* device (the hub->upstream device), and provides access to the downstream FSI
* bus as through an address range on the slave itself (->addr and ->size).
*
* [This differs from "cascaded" masters, which expose the entire downstream
* bus entirely through the fsi device address range, and so have a smaller
* accessible address space.]
*/
struct fsi_master_hub {
struct fsi_master master;
struct fsi_device *upstream;
uint32_t addr, size; /* slave-relative addr of */
/* master address space */
};
#define to_fsi_master_hub(m) container_of(m, struct fsi_master_hub, master)
static int hub_master_read(struct fsi_master *master, int link,
uint8_t id, uint32_t addr, void *val, size_t size)
{
struct fsi_master_hub *hub = to_fsi_master_hub(master);
if (id != 0)
return -EINVAL;
addr += hub->addr + (link * FSI_HUB_LINK_SIZE);
return fsi_slave_read(hub->upstream->slave, addr, val, size);
}
static int hub_master_write(struct fsi_master *master, int link,
uint8_t id, uint32_t addr, const void *val, size_t size)
{
struct fsi_master_hub *hub = to_fsi_master_hub(master);
if (id != 0)
return -EINVAL;
addr += hub->addr + (link * FSI_HUB_LINK_SIZE);
return fsi_slave_write(hub->upstream->slave, addr, val, size);
}
static int hub_master_break(struct fsi_master *master, int link)
{
uint32_t addr;
__be32 cmd;
addr = 0x4;
cmd = cpu_to_be32(0xc0de0000);
return hub_master_write(master, link, 0, addr, &cmd, sizeof(cmd));
}
static int hub_master_link_enable(struct fsi_master *master, int link,
bool enable)
{
struct fsi_master_hub *hub = to_fsi_master_hub(master);
int idx, bit;
__be32 reg;
int rc;
idx = link / 32;
bit = link % 32;
reg = cpu_to_be32(0x80000000 >> bit);
if (!enable)
return fsi_device_write(hub->upstream, FSI_MCENP0 + (4 * idx),
®, 4);
rc = fsi_device_write(hub->upstream, FSI_MSENP0 + (4 * idx), ®, 4);
if (rc)
return rc;
mdelay(FSI_LINK_ENABLE_SETUP_TIME);
return 0;
}
static void hub_master_release(struct device *dev)
{
struct fsi_master_hub *hub = to_fsi_master_hub(to_fsi_master(dev));
kfree(hub);
}
/* mmode encoders */
static inline u32 fsi_mmode_crs0(u32 x)
{
return (x & FSI_MMODE_CRS0MASK) << FSI_MMODE_CRS0SHFT;
}
static inline u32 fsi_mmode_crs1(u32 x)
{
return (x & FSI_MMODE_CRS1MASK) << FSI_MMODE_CRS1SHFT;
}
static int hub_master_init(struct fsi_master_hub *hub)
{
struct fsi_device *dev = hub->upstream;
__be32 reg;
int rc;
reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
| FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
rc = fsi_device_write(dev, FSI_MRESP0, ®, sizeof(reg));
if (rc)
return rc;
/* Initialize the MFSI (hub master) engine */
reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
| FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
rc = fsi_device_write(dev, FSI_MRESP0, ®, sizeof(reg));
if (rc)
return rc;
reg = cpu_to_be32(FSI_MECTRL_EOAE | FSI_MECTRL_P8_AUTO_TERM);
rc = fsi_device_write(dev, FSI_MECTRL, ®, sizeof(reg));
if (rc)
return rc;
reg = cpu_to_be32(FSI_MMODE_EIP | FSI_MMODE_ECRC | FSI_MMODE_EPC
| fsi_mmode_crs0(1) | fsi_mmode_crs1(1)
| FSI_MMODE_P8_TO_LSB);
rc = fsi_device_write(dev, FSI_MMODE, ®, sizeof(reg));
if (rc)
return rc;
reg = cpu_to_be32(0xffff0000);
rc = fsi_device_write(dev, FSI_MDLYR, ®, sizeof(reg));
if (rc)
return rc;
reg = cpu_to_be32(~0);
rc = fsi_device_write(dev, FSI_MSENP0, ®, sizeof(reg));
if (rc)
return rc;
/* Leave enabled long enough for master logic to set up */
mdelay(FSI_LINK_ENABLE_SETUP_TIME);
rc = fsi_device_write(dev, FSI_MCENP0, ®, sizeof(reg));
if (rc)
return rc;
rc = fsi_device_read(dev, FSI_MAEB, ®, sizeof(reg));
if (rc)
return rc;
reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK);
rc = fsi_device_write(dev, FSI_MRESP0, ®, sizeof(reg));
if (rc)
return rc;
rc = fsi_device_read(dev, FSI_MLEVP0, ®, sizeof(reg));
if (rc)
return rc;
/* Reset the master bridge */
reg = cpu_to_be32(FSI_MRESB_RST_GEN);
rc = fsi_device_write(dev, FSI_MRESB0, ®, sizeof(reg));
if (rc)
return rc;
reg = cpu_to_be32(FSI_MRESB_RST_ERR);
return fsi_device_write(dev, FSI_MRESB0, ®, sizeof(reg));
}
static int hub_master_probe(struct device *dev)
{
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct fsi_master_hub *hub;
uint32_t reg, links;
__be32 __reg;
int rc;
rc = fsi_device_read(fsi_dev, FSI_MVER, &__reg, sizeof(__reg));
if (rc)
return rc;
reg = be32_to_cpu(__reg);
links = (reg >> 8) & 0xff;
dev_dbg(dev, "hub version %08x (%d links)\n", reg, links);
rc = fsi_slave_claim_range(fsi_dev->slave, FSI_HUB_LINK_OFFSET,
FSI_HUB_LINK_SIZE * links);
if (rc) {
dev_err(dev, "can't claim slave address range for links");
return rc;
}
hub = kzalloc(sizeof(*hub), GFP_KERNEL);
if (!hub) {
rc = -ENOMEM;
goto err_release;
}
hub->addr = FSI_HUB_LINK_OFFSET;
hub->size = FSI_HUB_LINK_SIZE * links;
hub->upstream = fsi_dev;
hub->master.dev.parent = dev;
hub->master.dev.release = hub_master_release;
hub->master.dev.of_node = of_node_get(dev_of_node(dev));
hub->master.n_links = links;
hub->master.read = hub_master_read;
hub->master.write = hub_master_write;
hub->master.send_break = hub_master_break;
hub->master.link_enable = hub_master_link_enable;
dev_set_drvdata(dev, hub);
hub_master_init(hub);
rc = fsi_master_register(&hub->master);
if (rc)
goto err_release;
/* At this point, fsi_master_register performs the device_initialize(),
* and holds the sole reference on master.dev. This means the device
* will be freed (via ->release) during any subsequent call to
* fsi_master_unregister. We add our own reference to it here, so we
* can perform cleanup (in _remove()) without it being freed before
* we're ready.
*/
get_device(&hub->master.dev);
return 0;
err_release:
fsi_slave_release_range(fsi_dev->slave, FSI_HUB_LINK_OFFSET,
FSI_HUB_LINK_SIZE * links);
return rc;
}
static int hub_master_remove(struct device *dev)
{
struct fsi_master_hub *hub = dev_get_drvdata(dev);
fsi_master_unregister(&hub->master);
fsi_slave_release_range(hub->upstream->slave, hub->addr, hub->size);
of_node_put(hub->master.dev.of_node);
/*
* master.dev will likely be ->release()ed after this, which free()s
* the hub
*/
put_device(&hub->master.dev);
return 0;
}
static const struct fsi_device_id hub_master_ids[] = {
{
.engine_type = FSI_ENGID_HUB_MASTER,
.version = FSI_VERSION_ANY,
},
{ 0 }
};
static struct fsi_driver hub_master_driver = {
.id_table = hub_master_ids,
.drv = {
.name = "fsi-master-hub",
.bus = &fsi_bus_type,
.probe = hub_master_probe,
.remove = hub_master_remove,
}
};
module_fsi_driver(hub_master_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/fsi/fsi-master-hub.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/fsi-sbefifo.h>
#include <linux/gfp.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/fsi-occ.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#define OCC_SRAM_BYTES 4096
#define OCC_CMD_DATA_BYTES 4090
#define OCC_RESP_DATA_BYTES 4089
#define OCC_P9_SRAM_CMD_ADDR 0xFFFBE000
#define OCC_P9_SRAM_RSP_ADDR 0xFFFBF000
#define OCC_P10_SRAM_CMD_ADDR 0xFFFFD000
#define OCC_P10_SRAM_RSP_ADDR 0xFFFFE000
#define OCC_P10_SRAM_MODE 0x58 /* Normal mode, OCB channel 2 */
#define OCC_TIMEOUT_MS 1000
#define OCC_CMD_IN_PRG_WAIT_MS 50
enum versions { occ_p9, occ_p10 };
struct occ {
struct device *dev;
struct device *sbefifo;
char name[32];
int idx;
bool platform_hwmon;
u8 sequence_number;
void *buffer;
void *client_buffer;
size_t client_buffer_size;
size_t client_response_size;
enum versions version;
struct miscdevice mdev;
struct mutex occ_lock;
};
#define to_occ(x) container_of((x), struct occ, mdev)
struct occ_response {
u8 seq_no;
u8 cmd_type;
u8 return_status;
__be16 data_length;
u8 data[OCC_RESP_DATA_BYTES + 2]; /* two bytes checksum */
} __packed;
struct occ_client {
struct occ *occ;
struct mutex lock;
size_t data_size;
size_t read_offset;
u8 *buffer;
};
#define to_client(x) container_of((x), struct occ_client, xfr)
static DEFINE_IDA(occ_ida);
static int occ_open(struct inode *inode, struct file *file)
{
struct occ_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
struct miscdevice *mdev = file->private_data;
struct occ *occ = to_occ(mdev);
if (!client)
return -ENOMEM;
client->buffer = (u8 *)__get_free_page(GFP_KERNEL);
if (!client->buffer) {
kfree(client);
return -ENOMEM;
}
client->occ = occ;
mutex_init(&client->lock);
file->private_data = client;
get_device(occ->dev);
/* We allocate a 1-page buffer, make sure it all fits */
BUILD_BUG_ON((OCC_CMD_DATA_BYTES + 3) > PAGE_SIZE);
BUILD_BUG_ON((OCC_RESP_DATA_BYTES + 7) > PAGE_SIZE);
return 0;
}
static ssize_t occ_read(struct file *file, char __user *buf, size_t len,
loff_t *offset)
{
struct occ_client *client = file->private_data;
ssize_t rc = 0;
if (!client)
return -ENODEV;
if (len > OCC_SRAM_BYTES)
return -EINVAL;
mutex_lock(&client->lock);
/* This should not be possible ... */
if (WARN_ON_ONCE(client->read_offset > client->data_size)) {
rc = -EIO;
goto done;
}
/* Grab how much data we have to read */
rc = min(len, client->data_size - client->read_offset);
if (copy_to_user(buf, client->buffer + client->read_offset, rc))
rc = -EFAULT;
else
client->read_offset += rc;
done:
mutex_unlock(&client->lock);
return rc;
}
static ssize_t occ_write(struct file *file, const char __user *buf,
size_t len, loff_t *offset)
{
struct occ_client *client = file->private_data;
size_t rlen, data_length;
ssize_t rc;
u8 *cmd;
if (!client)
return -ENODEV;
if (len > (OCC_CMD_DATA_BYTES + 3) || len < 3)
return -EINVAL;
mutex_lock(&client->lock);
/* Construct the command */
cmd = client->buffer;
/*
* Copy the user command (assume user data follows the occ command
* format)
* byte 0: command type
* bytes 1-2: data length (msb first)
* bytes 3-n: data
*/
if (copy_from_user(&cmd[1], buf, len)) {
rc = -EFAULT;
goto done;
}
/* Extract data length */
data_length = (cmd[2] << 8) + cmd[3];
if (data_length > OCC_CMD_DATA_BYTES) {
rc = -EINVAL;
goto done;
}
/* Submit command; 4 bytes before the data and 2 bytes after */
rlen = PAGE_SIZE;
rc = fsi_occ_submit(client->occ->dev, cmd, data_length + 6, cmd,
&rlen);
if (rc)
goto done;
/* Set read tracking data */
client->data_size = rlen;
client->read_offset = 0;
/* Done */
rc = len;
done:
mutex_unlock(&client->lock);
return rc;
}
static int occ_release(struct inode *inode, struct file *file)
{
struct occ_client *client = file->private_data;
put_device(client->occ->dev);
free_page((unsigned long)client->buffer);
kfree(client);
return 0;
}
static const struct file_operations occ_fops = {
.owner = THIS_MODULE,
.open = occ_open,
.read = occ_read,
.write = occ_write,
.release = occ_release,
};
static void occ_save_ffdc(struct occ *occ, __be32 *resp, size_t parsed_len,
size_t resp_len)
{
if (resp_len > parsed_len) {
size_t dh = resp_len - parsed_len;
size_t ffdc_len = (dh - 1) * 4; /* SBE words are four bytes */
__be32 *ffdc = &resp[parsed_len];
if (ffdc_len > occ->client_buffer_size)
ffdc_len = occ->client_buffer_size;
memcpy(occ->client_buffer, ffdc, ffdc_len);
occ->client_response_size = ffdc_len;
}
}
static int occ_verify_checksum(struct occ *occ, struct occ_response *resp,
u16 data_length)
{
/* Fetch the two bytes after the data for the checksum. */
u16 checksum_resp = get_unaligned_be16(&resp->data[data_length]);
u16 checksum;
u16 i;
checksum = resp->seq_no;
checksum += resp->cmd_type;
checksum += resp->return_status;
checksum += (data_length >> 8) + (data_length & 0xFF);
for (i = 0; i < data_length; ++i)
checksum += resp->data[i];
if (checksum != checksum_resp) {
dev_err(occ->dev, "Bad checksum: %04x!=%04x\n", checksum,
checksum_resp);
return -EBADE;
}
return 0;
}
static int occ_getsram(struct occ *occ, u32 offset, void *data, ssize_t len)
{
u32 data_len = ((len + 7) / 8) * 8; /* must be multiples of 8 B */
size_t cmd_len, parsed_len, resp_data_len;
size_t resp_len = OCC_MAX_RESP_WORDS;
__be32 *resp = occ->buffer;
__be32 cmd[6];
int idx = 0, rc;
/*
* Magic sequence to do SBE getsram command. SBE will fetch data from
* specified SRAM address.
*/
switch (occ->version) {
default:
case occ_p9:
cmd_len = 5;
cmd[2] = cpu_to_be32(1); /* Normal mode */
cmd[3] = cpu_to_be32(OCC_P9_SRAM_RSP_ADDR + offset);
break;
case occ_p10:
idx = 1;
cmd_len = 6;
cmd[2] = cpu_to_be32(OCC_P10_SRAM_MODE);
cmd[3] = 0;
cmd[4] = cpu_to_be32(OCC_P10_SRAM_RSP_ADDR + offset);
break;
}
cmd[0] = cpu_to_be32(cmd_len);
cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_OCC_SRAM);
cmd[4 + idx] = cpu_to_be32(data_len);
rc = sbefifo_submit(occ->sbefifo, cmd, cmd_len, resp, &resp_len);
if (rc)
return rc;
rc = sbefifo_parse_status(occ->sbefifo, SBEFIFO_CMD_GET_OCC_SRAM,
resp, resp_len, &parsed_len);
if (rc > 0) {
dev_err(occ->dev, "SRAM read returned failure status: %08x\n",
rc);
occ_save_ffdc(occ, resp, parsed_len, resp_len);
return -ECOMM;
} else if (rc) {
return rc;
}
resp_data_len = be32_to_cpu(resp[parsed_len - 1]);
if (resp_data_len != data_len) {
dev_err(occ->dev, "SRAM read expected %d bytes got %zd\n",
data_len, resp_data_len);
rc = -EBADMSG;
} else {
memcpy(data, resp, len);
}
return rc;
}
static int occ_putsram(struct occ *occ, const void *data, ssize_t len,
u8 seq_no, u16 checksum)
{
u32 data_len = ((len + 7) / 8) * 8; /* must be multiples of 8 B */
size_t cmd_len, parsed_len, resp_data_len;
size_t resp_len = OCC_MAX_RESP_WORDS;
__be32 *buf = occ->buffer;
u8 *byte_buf;
int idx = 0, rc;
cmd_len = (occ->version == occ_p10) ? 6 : 5;
cmd_len += data_len >> 2;
/*
* Magic sequence to do SBE putsram command. SBE will transfer
* data to specified SRAM address.
*/
buf[0] = cpu_to_be32(cmd_len);
buf[1] = cpu_to_be32(SBEFIFO_CMD_PUT_OCC_SRAM);
switch (occ->version) {
default:
case occ_p9:
buf[2] = cpu_to_be32(1); /* Normal mode */
buf[3] = cpu_to_be32(OCC_P9_SRAM_CMD_ADDR);
break;
case occ_p10:
idx = 1;
buf[2] = cpu_to_be32(OCC_P10_SRAM_MODE);
buf[3] = 0;
buf[4] = cpu_to_be32(OCC_P10_SRAM_CMD_ADDR);
break;
}
buf[4 + idx] = cpu_to_be32(data_len);
memcpy(&buf[5 + idx], data, len);
byte_buf = (u8 *)&buf[5 + idx];
/*
* Overwrite the first byte with our sequence number and the last two
* bytes with the checksum.
*/
byte_buf[0] = seq_no;
byte_buf[len - 2] = checksum >> 8;
byte_buf[len - 1] = checksum & 0xff;
rc = sbefifo_submit(occ->sbefifo, buf, cmd_len, buf, &resp_len);
if (rc)
return rc;
rc = sbefifo_parse_status(occ->sbefifo, SBEFIFO_CMD_PUT_OCC_SRAM,
buf, resp_len, &parsed_len);
if (rc > 0) {
dev_err(occ->dev, "SRAM write returned failure status: %08x\n",
rc);
occ_save_ffdc(occ, buf, parsed_len, resp_len);
return -ECOMM;
} else if (rc) {
return rc;
}
if (parsed_len != 1) {
dev_err(occ->dev, "SRAM write response length invalid: %zd\n",
parsed_len);
rc = -EBADMSG;
} else {
resp_data_len = be32_to_cpu(buf[0]);
if (resp_data_len != data_len) {
dev_err(occ->dev,
"SRAM write expected %d bytes got %zd\n",
data_len, resp_data_len);
rc = -EBADMSG;
}
}
return rc;
}
static int occ_trigger_attn(struct occ *occ)
{
__be32 *buf = occ->buffer;
size_t cmd_len, parsed_len, resp_data_len;
size_t resp_len = OCC_MAX_RESP_WORDS;
int idx = 0, rc;
switch (occ->version) {
default:
case occ_p9:
cmd_len = 7;
buf[2] = cpu_to_be32(3); /* Circular mode */
buf[3] = 0;
break;
case occ_p10:
idx = 1;
cmd_len = 8;
buf[2] = cpu_to_be32(0xd0); /* Circular mode, OCB Channel 1 */
buf[3] = 0;
buf[4] = 0;
break;
}
buf[0] = cpu_to_be32(cmd_len); /* Chip-op length in words */
buf[1] = cpu_to_be32(SBEFIFO_CMD_PUT_OCC_SRAM);
buf[4 + idx] = cpu_to_be32(8); /* Data length in bytes */
buf[5 + idx] = cpu_to_be32(0x20010000); /* Trigger OCC attention */
buf[6 + idx] = 0;
rc = sbefifo_submit(occ->sbefifo, buf, cmd_len, buf, &resp_len);
if (rc)
return rc;
rc = sbefifo_parse_status(occ->sbefifo, SBEFIFO_CMD_PUT_OCC_SRAM,
buf, resp_len, &parsed_len);
if (rc > 0) {
dev_err(occ->dev, "SRAM attn returned failure status: %08x\n",
rc);
occ_save_ffdc(occ, buf, parsed_len, resp_len);
return -ECOMM;
} else if (rc) {
return rc;
}
if (parsed_len != 1) {
dev_err(occ->dev, "SRAM attn response length invalid: %zd\n",
parsed_len);
rc = -EBADMSG;
} else {
resp_data_len = be32_to_cpu(buf[0]);
if (resp_data_len != 8) {
dev_err(occ->dev,
"SRAM attn expected 8 bytes got %zd\n",
resp_data_len);
rc = -EBADMSG;
}
}
return rc;
}
static bool fsi_occ_response_not_ready(struct occ_response *resp, u8 seq_no,
u8 cmd_type)
{
return resp->return_status == OCC_RESP_CMD_IN_PRG ||
resp->return_status == OCC_RESP_CRIT_INIT ||
resp->seq_no != seq_no || resp->cmd_type != cmd_type;
}
int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
void *response, size_t *resp_len)
{
const unsigned long timeout = msecs_to_jiffies(OCC_TIMEOUT_MS);
const unsigned long wait_time =
msecs_to_jiffies(OCC_CMD_IN_PRG_WAIT_MS);
struct occ *occ = dev_get_drvdata(dev);
struct occ_response *resp = response;
size_t user_resp_len = *resp_len;
u8 seq_no;
u8 cmd_type;
u16 checksum = 0;
u16 resp_data_length;
const u8 *byte_request = (const u8 *)request;
unsigned long end;
int rc;
size_t i;
*resp_len = 0;
if (!occ)
return -ENODEV;
if (user_resp_len < 7) {
dev_dbg(dev, "Bad resplen %zd\n", user_resp_len);
return -EINVAL;
}
cmd_type = byte_request[1];
/* Checksum the request, ignoring first byte (sequence number). */
for (i = 1; i < req_len - 2; ++i)
checksum += byte_request[i];
rc = mutex_lock_interruptible(&occ->occ_lock);
if (rc)
return rc;
occ->client_buffer = response;
occ->client_buffer_size = user_resp_len;
occ->client_response_size = 0;
if (!occ->buffer) {
rc = -ENOENT;
goto done;
}
/*
* Get a sequence number and update the counter. Avoid a sequence
* number of 0 which would pass the response check below even if the
* OCC response is uninitialized. Any sequence number the user is
* trying to send is overwritten since this function is the only common
* interface to the OCC and therefore the only place we can guarantee
* unique sequence numbers.
*/
seq_no = occ->sequence_number++;
if (!occ->sequence_number)
occ->sequence_number = 1;
checksum += seq_no;
rc = occ_putsram(occ, request, req_len, seq_no, checksum);
if (rc)
goto done;
rc = occ_trigger_attn(occ);
if (rc)
goto done;
end = jiffies + timeout;
while (true) {
/* Read occ response header */
rc = occ_getsram(occ, 0, resp, 8);
if (rc)
goto done;
if (fsi_occ_response_not_ready(resp, seq_no, cmd_type)) {
if (time_after(jiffies, end)) {
dev_err(occ->dev,
"resp timeout status=%02x seq=%d cmd=%d, our seq=%d cmd=%d\n",
resp->return_status, resp->seq_no,
resp->cmd_type, seq_no, cmd_type);
rc = -ETIMEDOUT;
goto done;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(wait_time);
} else {
/* Extract size of response data */
resp_data_length =
get_unaligned_be16(&resp->data_length);
/*
* Message size is data length + 5 bytes header + 2
* bytes checksum
*/
if ((resp_data_length + 7) > user_resp_len) {
rc = -EMSGSIZE;
goto done;
}
/*
* Get the entire response including the header again,
* in case it changed
*/
if (resp_data_length > 1) {
rc = occ_getsram(occ, 0, resp,
resp_data_length + 7);
if (rc)
goto done;
if (!fsi_occ_response_not_ready(resp, seq_no,
cmd_type))
break;
} else {
break;
}
}
}
dev_dbg(dev, "resp_status=%02x resp_data_len=%d\n",
resp->return_status, resp_data_length);
rc = occ_verify_checksum(occ, resp, resp_data_length);
if (rc)
goto done;
occ->client_response_size = resp_data_length + 7;
done:
*resp_len = occ->client_response_size;
mutex_unlock(&occ->occ_lock);
return rc;
}
EXPORT_SYMBOL_GPL(fsi_occ_submit);
static int occ_unregister_platform_child(struct device *dev, void *data)
{
struct platform_device *hwmon_dev = to_platform_device(dev);
platform_device_unregister(hwmon_dev);
return 0;
}
static int occ_unregister_of_child(struct device *dev, void *data)
{
struct platform_device *hwmon_dev = to_platform_device(dev);
of_device_unregister(hwmon_dev);
if (dev->of_node)
of_node_clear_flag(dev->of_node, OF_POPULATED);
return 0;
}
static int occ_probe(struct platform_device *pdev)
{
int rc;
u32 reg;
char child_name[32];
struct occ *occ;
struct platform_device *hwmon_dev = NULL;
struct device_node *hwmon_node;
struct device *dev = &pdev->dev;
struct platform_device_info hwmon_dev_info = {
.parent = dev,
.name = "occ-hwmon",
};
occ = devm_kzalloc(dev, sizeof(*occ), GFP_KERNEL);
if (!occ)
return -ENOMEM;
/* SBE words are always four bytes */
occ->buffer = kvmalloc(OCC_MAX_RESP_WORDS * 4, GFP_KERNEL);
if (!occ->buffer)
return -ENOMEM;
occ->version = (uintptr_t)of_device_get_match_data(dev);
occ->dev = dev;
occ->sbefifo = dev->parent;
/*
* Quickly derive a pseudo-random number from jiffies so that
* re-probing the driver doesn't accidentally overlap sequence numbers.
*/
occ->sequence_number = (u8)((jiffies % 0xff) + 1);
mutex_init(&occ->occ_lock);
if (dev->of_node) {
rc = of_property_read_u32(dev->of_node, "reg", ®);
if (!rc) {
/* make sure we don't have a duplicate from dts */
occ->idx = ida_simple_get(&occ_ida, reg, reg + 1,
GFP_KERNEL);
if (occ->idx < 0)
occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX,
GFP_KERNEL);
} else {
occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX,
GFP_KERNEL);
}
} else {
occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX, GFP_KERNEL);
}
platform_set_drvdata(pdev, occ);
snprintf(occ->name, sizeof(occ->name), "occ%d", occ->idx);
occ->mdev.fops = &occ_fops;
occ->mdev.minor = MISC_DYNAMIC_MINOR;
occ->mdev.name = occ->name;
occ->mdev.parent = dev;
rc = misc_register(&occ->mdev);
if (rc) {
dev_err(dev, "failed to register miscdevice: %d\n", rc);
ida_simple_remove(&occ_ida, occ->idx);
kvfree(occ->buffer);
return rc;
}
hwmon_node = of_get_child_by_name(dev->of_node, hwmon_dev_info.name);
if (hwmon_node) {
snprintf(child_name, sizeof(child_name), "%s.%d", hwmon_dev_info.name, occ->idx);
hwmon_dev = of_platform_device_create(hwmon_node, child_name, dev);
of_node_put(hwmon_node);
}
if (!hwmon_dev) {
occ->platform_hwmon = true;
hwmon_dev_info.id = occ->idx;
hwmon_dev = platform_device_register_full(&hwmon_dev_info);
if (IS_ERR(hwmon_dev))
dev_warn(dev, "failed to create hwmon device\n");
}
return 0;
}
static int occ_remove(struct platform_device *pdev)
{
struct occ *occ = platform_get_drvdata(pdev);
misc_deregister(&occ->mdev);
mutex_lock(&occ->occ_lock);
kvfree(occ->buffer);
occ->buffer = NULL;
mutex_unlock(&occ->occ_lock);
if (occ->platform_hwmon)
device_for_each_child(&pdev->dev, NULL, occ_unregister_platform_child);
else
device_for_each_child(&pdev->dev, NULL, occ_unregister_of_child);
ida_simple_remove(&occ_ida, occ->idx);
return 0;
}
static const struct of_device_id occ_match[] = {
{
.compatible = "ibm,p9-occ",
.data = (void *)occ_p9
},
{
.compatible = "ibm,p10-occ",
.data = (void *)occ_p10
},
{ },
};
MODULE_DEVICE_TABLE(of, occ_match);
static struct platform_driver occ_driver = {
.driver = {
.name = "occ",
.of_match_table = occ_match,
},
.probe = occ_probe,
.remove = occ_remove,
};
static int occ_init(void)
{
return platform_driver_register(&occ_driver);
}
static void occ_exit(void)
{
platform_driver_unregister(&occ_driver);
ida_destroy(&occ_ida);
}
module_init(occ_init);
module_exit(occ_exit);
MODULE_AUTHOR("Eddie James <[email protected]>");
MODULE_DESCRIPTION("BMC P9 OCC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/fsi/fsi-occ.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SCOM FSI Client device driver
*
* Copyright (C) IBM Corporation 2016
*/
#include <linux/fsi.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/mod_devicetable.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <uapi/linux/fsi.h>
#define FSI_ENGID_SCOM 0x5
/* SCOM engine register set */
#define SCOM_DATA0_REG 0x00
#define SCOM_DATA1_REG 0x04
#define SCOM_CMD_REG 0x08
#define SCOM_FSI2PIB_RESET_REG 0x18
#define SCOM_STATUS_REG 0x1C /* Read */
#define SCOM_PIB_RESET_REG 0x1C /* Write */
/* Command register */
#define SCOM_WRITE_CMD 0x80000000
#define SCOM_READ_CMD 0x00000000
/* Status register bits */
#define SCOM_STATUS_ERR_SUMMARY 0x80000000
#define SCOM_STATUS_PROTECTION 0x01000000
#define SCOM_STATUS_PARITY 0x04000000
#define SCOM_STATUS_PIB_ABORT 0x00100000
#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
#define SCOM_STATUS_PIB_RESP_SHIFT 12
#define SCOM_STATUS_FSI2PIB_ERROR (SCOM_STATUS_PROTECTION | \
SCOM_STATUS_PARITY | \
SCOM_STATUS_PIB_ABORT)
#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_FSI2PIB_ERROR | \
SCOM_STATUS_PIB_RESP_MASK)
/* SCOM address encodings */
#define XSCOM_ADDR_IND_FLAG BIT_ULL(63)
#define XSCOM_ADDR_INF_FORM1 BIT_ULL(60)
/* SCOM indirect stuff */
#define XSCOM_ADDR_DIRECT_PART 0x7fffffffull
#define XSCOM_ADDR_INDIRECT_PART 0x000fffff00000000ull
#define XSCOM_DATA_IND_READ BIT_ULL(63)
#define XSCOM_DATA_IND_COMPLETE BIT_ULL(31)
#define XSCOM_DATA_IND_ERR_MASK 0x70000000ull
#define XSCOM_DATA_IND_ERR_SHIFT 28
#define XSCOM_DATA_IND_DATA 0x0000ffffull
#define XSCOM_DATA_IND_FORM1_DATA 0x000fffffffffffffull
#define XSCOM_ADDR_FORM1_LOW 0x000ffffffffull
#define XSCOM_ADDR_FORM1_HI 0xfff00000000ull
#define XSCOM_ADDR_FORM1_HI_SHIFT 20
/* Retries */
#define SCOM_MAX_IND_RETRIES 10 /* Retries indirect not ready */
struct scom_device {
struct list_head link;
struct fsi_device *fsi_dev;
struct device dev;
struct cdev cdev;
struct mutex lock;
bool dead;
};
static int __put_scom(struct scom_device *scom_dev, uint64_t value,
uint32_t addr, uint32_t *status)
{
__be32 data, raw_status;
int rc;
data = cpu_to_be32((value >> 32) & 0xffffffff);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
data = cpu_to_be32(value & 0xffffffff);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA1_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
data = cpu_to_be32(SCOM_WRITE_CMD | addr);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
sizeof(uint32_t));
if (rc)
return rc;
*status = be32_to_cpu(raw_status);
return 0;
}
static int __get_scom(struct scom_device *scom_dev, uint64_t *value,
uint32_t addr, uint32_t *status)
{
__be32 data, raw_status;
int rc;
*value = 0ULL;
data = cpu_to_be32(SCOM_READ_CMD | addr);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_CMD_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
rc = fsi_device_read(scom_dev->fsi_dev, SCOM_STATUS_REG, &raw_status,
sizeof(uint32_t));
if (rc)
return rc;
/*
* Read the data registers even on error, so we don't have
* to interpret the status register here.
*/
rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
*value |= (uint64_t)be32_to_cpu(data) << 32;
rc = fsi_device_read(scom_dev->fsi_dev, SCOM_DATA1_REG, &data,
sizeof(uint32_t));
if (rc)
return rc;
*value |= be32_to_cpu(data);
*status = be32_to_cpu(raw_status);
return rc;
}
static int put_indirect_scom_form0(struct scom_device *scom, uint64_t value,
uint64_t addr, uint32_t *status)
{
uint64_t ind_data, ind_addr;
int rc, err;
if (value & ~XSCOM_DATA_IND_DATA)
return -EINVAL;
ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | value;
rc = __put_scom(scom, ind_data, ind_addr, status);
if (rc || (*status & SCOM_STATUS_ANY_ERR))
return rc;
rc = __get_scom(scom, &ind_data, addr, status);
if (rc || (*status & SCOM_STATUS_ANY_ERR))
return rc;
err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
*status = err << SCOM_STATUS_PIB_RESP_SHIFT;
return 0;
}
static int put_indirect_scom_form1(struct scom_device *scom, uint64_t value,
uint64_t addr, uint32_t *status)
{
uint64_t ind_data, ind_addr;
if (value & ~XSCOM_DATA_IND_FORM1_DATA)
return -EINVAL;
ind_addr = addr & XSCOM_ADDR_FORM1_LOW;
ind_data = value | (addr & XSCOM_ADDR_FORM1_HI) << XSCOM_ADDR_FORM1_HI_SHIFT;
return __put_scom(scom, ind_data, ind_addr, status);
}
static int get_indirect_scom_form0(struct scom_device *scom, uint64_t *value,
uint64_t addr, uint32_t *status)
{
uint64_t ind_data, ind_addr;
int rc, err;
ind_addr = addr & XSCOM_ADDR_DIRECT_PART;
ind_data = (addr & XSCOM_ADDR_INDIRECT_PART) | XSCOM_DATA_IND_READ;
rc = __put_scom(scom, ind_data, ind_addr, status);
if (rc || (*status & SCOM_STATUS_ANY_ERR))
return rc;
rc = __get_scom(scom, &ind_data, addr, status);
if (rc || (*status & SCOM_STATUS_ANY_ERR))
return rc;
err = (ind_data & XSCOM_DATA_IND_ERR_MASK) >> XSCOM_DATA_IND_ERR_SHIFT;
*status = err << SCOM_STATUS_PIB_RESP_SHIFT;
*value = ind_data & XSCOM_DATA_IND_DATA;
return 0;
}
static int raw_put_scom(struct scom_device *scom, uint64_t value,
uint64_t addr, uint32_t *status)
{
if (addr & XSCOM_ADDR_IND_FLAG) {
if (addr & XSCOM_ADDR_INF_FORM1)
return put_indirect_scom_form1(scom, value, addr, status);
else
return put_indirect_scom_form0(scom, value, addr, status);
} else
return __put_scom(scom, value, addr, status);
}
static int raw_get_scom(struct scom_device *scom, uint64_t *value,
uint64_t addr, uint32_t *status)
{
if (addr & XSCOM_ADDR_IND_FLAG) {
if (addr & XSCOM_ADDR_INF_FORM1)
return -ENXIO;
return get_indirect_scom_form0(scom, value, addr, status);
} else
return __get_scom(scom, value, addr, status);
}
static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
{
uint32_t dummy = -1;
if (status & SCOM_STATUS_FSI2PIB_ERROR)
fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
sizeof(uint32_t));
if (status & SCOM_STATUS_PROTECTION)
return -EPERM;
if (status & SCOM_STATUS_PARITY)
return -EIO;
if (status & SCOM_STATUS_PIB_ABORT)
return -EBUSY;
return 0;
}
static int handle_pib_status(struct scom_device *scom, uint8_t status)
{
uint32_t dummy = -1;
if (status == SCOM_PIB_SUCCESS)
return 0;
if (status == SCOM_PIB_BLOCKED)
return -EBUSY;
/* Reset the bridge */
fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
sizeof(uint32_t));
switch(status) {
case SCOM_PIB_OFFLINE:
return -ENODEV;
case SCOM_PIB_BAD_ADDR:
return -ENXIO;
case SCOM_PIB_TIMEOUT:
return -ETIMEDOUT;
case SCOM_PIB_PARTIAL:
case SCOM_PIB_CLK_ERR:
case SCOM_PIB_PARITY_ERR:
default:
return -EIO;
}
}
static int put_scom(struct scom_device *scom, uint64_t value,
uint64_t addr)
{
uint32_t status;
int rc;
rc = raw_put_scom(scom, value, addr, &status);
if (rc)
return rc;
rc = handle_fsi2pib_status(scom, status);
if (rc)
return rc;
return handle_pib_status(scom,
(status & SCOM_STATUS_PIB_RESP_MASK)
>> SCOM_STATUS_PIB_RESP_SHIFT);
}
static int get_scom(struct scom_device *scom, uint64_t *value,
uint64_t addr)
{
uint32_t status;
int rc;
rc = raw_get_scom(scom, value, addr, &status);
if (rc)
return rc;
rc = handle_fsi2pib_status(scom, status);
if (rc)
return rc;
return handle_pib_status(scom,
(status & SCOM_STATUS_PIB_RESP_MASK)
>> SCOM_STATUS_PIB_RESP_SHIFT);
}
static ssize_t scom_read(struct file *filep, char __user *buf, size_t len,
loff_t *offset)
{
struct scom_device *scom = filep->private_data;
struct device *dev = &scom->fsi_dev->dev;
uint64_t val;
int rc;
if (len != sizeof(uint64_t))
return -EINVAL;
mutex_lock(&scom->lock);
if (scom->dead)
rc = -ENODEV;
else
rc = get_scom(scom, &val, *offset);
mutex_unlock(&scom->lock);
if (rc) {
dev_dbg(dev, "get_scom fail:%d\n", rc);
return rc;
}
rc = copy_to_user(buf, &val, len);
if (rc)
dev_dbg(dev, "copy to user failed:%d\n", rc);
return rc ? rc : len;
}
static ssize_t scom_write(struct file *filep, const char __user *buf,
size_t len, loff_t *offset)
{
int rc;
struct scom_device *scom = filep->private_data;
struct device *dev = &scom->fsi_dev->dev;
uint64_t val;
if (len != sizeof(uint64_t))
return -EINVAL;
rc = copy_from_user(&val, buf, len);
if (rc) {
dev_dbg(dev, "copy from user failed:%d\n", rc);
return -EINVAL;
}
mutex_lock(&scom->lock);
if (scom->dead)
rc = -ENODEV;
else
rc = put_scom(scom, val, *offset);
mutex_unlock(&scom->lock);
if (rc) {
dev_dbg(dev, "put_scom failed with:%d\n", rc);
return rc;
}
return len;
}
static loff_t scom_llseek(struct file *file, loff_t offset, int whence)
{
switch (whence) {
case SEEK_CUR:
break;
case SEEK_SET:
file->f_pos = offset;
break;
default:
return -EINVAL;
}
return offset;
}
static void raw_convert_status(struct scom_access *acc, uint32_t status)
{
acc->pib_status = (status & SCOM_STATUS_PIB_RESP_MASK) >>
SCOM_STATUS_PIB_RESP_SHIFT;
acc->intf_errors = 0;
if (status & SCOM_STATUS_PROTECTION)
acc->intf_errors |= SCOM_INTF_ERR_PROTECTION;
else if (status & SCOM_STATUS_PARITY)
acc->intf_errors |= SCOM_INTF_ERR_PARITY;
else if (status & SCOM_STATUS_PIB_ABORT)
acc->intf_errors |= SCOM_INTF_ERR_ABORT;
else if (status & SCOM_STATUS_ERR_SUMMARY)
acc->intf_errors |= SCOM_INTF_ERR_UNKNOWN;
}
static int scom_raw_read(struct scom_device *scom, void __user *argp)
{
struct scom_access acc;
uint32_t status;
int rc;
if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
return -EFAULT;
rc = raw_get_scom(scom, &acc.data, acc.addr, &status);
if (rc)
return rc;
raw_convert_status(&acc, status);
if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
return -EFAULT;
return 0;
}
static int scom_raw_write(struct scom_device *scom, void __user *argp)
{
u64 prev_data, mask, data;
struct scom_access acc;
uint32_t status;
int rc;
if (copy_from_user(&acc, argp, sizeof(struct scom_access)))
return -EFAULT;
if (acc.mask) {
rc = raw_get_scom(scom, &prev_data, acc.addr, &status);
if (rc)
return rc;
if (status & SCOM_STATUS_ANY_ERR)
goto fail;
mask = acc.mask;
} else {
prev_data = mask = -1ull;
}
data = (prev_data & ~mask) | (acc.data & mask);
rc = raw_put_scom(scom, data, acc.addr, &status);
if (rc)
return rc;
fail:
raw_convert_status(&acc, status);
if (copy_to_user(argp, &acc, sizeof(struct scom_access)))
return -EFAULT;
return 0;
}
static int scom_reset(struct scom_device *scom, void __user *argp)
{
uint32_t flags, dummy = -1;
int rc = 0;
if (get_user(flags, (__u32 __user *)argp))
return -EFAULT;
if (flags & SCOM_RESET_PIB)
rc = fsi_device_write(scom->fsi_dev, SCOM_PIB_RESET_REG, &dummy,
sizeof(uint32_t));
if (!rc && (flags & (SCOM_RESET_PIB | SCOM_RESET_INTF)))
rc = fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
sizeof(uint32_t));
return rc;
}
static int scom_check(struct scom_device *scom, void __user *argp)
{
/* Still need to find out how to get "protected" */
return put_user(SCOM_CHECK_SUPPORTED, (__u32 __user *)argp);
}
static long scom_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct scom_device *scom = file->private_data;
void __user *argp = (void __user *)arg;
int rc = -ENOTTY;
mutex_lock(&scom->lock);
if (scom->dead) {
mutex_unlock(&scom->lock);
return -ENODEV;
}
switch(cmd) {
case FSI_SCOM_CHECK:
rc = scom_check(scom, argp);
break;
case FSI_SCOM_READ:
rc = scom_raw_read(scom, argp);
break;
case FSI_SCOM_WRITE:
rc = scom_raw_write(scom, argp);
break;
case FSI_SCOM_RESET:
rc = scom_reset(scom, argp);
break;
}
mutex_unlock(&scom->lock);
return rc;
}
static int scom_open(struct inode *inode, struct file *file)
{
struct scom_device *scom = container_of(inode->i_cdev, struct scom_device, cdev);
file->private_data = scom;
return 0;
}
static const struct file_operations scom_fops = {
.owner = THIS_MODULE,
.open = scom_open,
.llseek = scom_llseek,
.read = scom_read,
.write = scom_write,
.unlocked_ioctl = scom_ioctl,
};
static void scom_free(struct device *dev)
{
struct scom_device *scom = container_of(dev, struct scom_device, dev);
put_device(&scom->fsi_dev->dev);
kfree(scom);
}
static int scom_probe(struct device *dev)
{
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct scom_device *scom;
int rc, didx;
scom = kzalloc(sizeof(*scom), GFP_KERNEL);
if (!scom)
return -ENOMEM;
dev_set_drvdata(dev, scom);
mutex_init(&scom->lock);
/* Grab a reference to the device (parent of our cdev), we'll drop it later */
if (!get_device(dev)) {
kfree(scom);
return -ENODEV;
}
scom->fsi_dev = fsi_dev;
/* Create chardev for userspace access */
scom->dev.type = &fsi_cdev_type;
scom->dev.parent = dev;
scom->dev.release = scom_free;
device_initialize(&scom->dev);
/* Allocate a minor in the FSI space */
rc = fsi_get_new_minor(fsi_dev, fsi_dev_scom, &scom->dev.devt, &didx);
if (rc)
goto err;
dev_set_name(&scom->dev, "scom%d", didx);
cdev_init(&scom->cdev, &scom_fops);
rc = cdev_device_add(&scom->cdev, &scom->dev);
if (rc) {
dev_err(dev, "Error %d creating char device %s\n",
rc, dev_name(&scom->dev));
goto err_free_minor;
}
return 0;
err_free_minor:
fsi_free_minor(scom->dev.devt);
err:
put_device(&scom->dev);
return rc;
}
static int scom_remove(struct device *dev)
{
struct scom_device *scom = dev_get_drvdata(dev);
mutex_lock(&scom->lock);
scom->dead = true;
mutex_unlock(&scom->lock);
cdev_device_del(&scom->cdev, &scom->dev);
fsi_free_minor(scom->dev.devt);
put_device(&scom->dev);
return 0;
}
static const struct of_device_id scom_of_ids[] = {
{ .compatible = "ibm,fsi2pib" },
{ }
};
MODULE_DEVICE_TABLE(of, scom_of_ids);
static const struct fsi_device_id scom_ids[] = {
{
.engine_type = FSI_ENGID_SCOM,
.version = FSI_VERSION_ANY,
},
{ 0 }
};
static struct fsi_driver scom_drv = {
.id_table = scom_ids,
.drv = {
.name = "scom",
.bus = &fsi_bus_type,
.of_match_table = scom_of_ids,
.probe = scom_probe,
.remove = scom_remove,
}
};
static int scom_init(void)
{
return fsi_driver_register(&scom_drv);
}
static void scom_exit(void)
{
fsi_driver_unregister(&scom_drv);
}
module_init(scom_init);
module_exit(scom_exit);
MODULE_LICENSE("GPL");
| linux-master | drivers/fsi/fsi-scom.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) IBM Corporation 2023 */
#include <linux/device.h>
#include <linux/fsi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include "fsi-master-i2cr.h"
#define CREATE_TRACE_POINTS
#include <trace/events/fsi_master_i2cr.h>
#define I2CR_ADDRESS_CFAM(a) ((a) >> 2)
#define I2CR_INITIAL_PARITY true
#define I2CR_STATUS_CMD 0x60002
#define I2CR_STATUS_ERR BIT_ULL(61)
#define I2CR_ERROR_CMD 0x60004
#define I2CR_LOG_CMD 0x60008
static const u8 i2cr_cfam[] = {
0xc0, 0x02, 0x0d, 0xa6,
0x80, 0x01, 0x10, 0x02,
0x80, 0x01, 0x10, 0x02,
0x80, 0x01, 0x10, 0x02,
0x80, 0x01, 0x80, 0x52,
0x80, 0x01, 0x10, 0x02,
0x80, 0x01, 0x10, 0x02,
0x80, 0x01, 0x10, 0x02,
0x80, 0x01, 0x10, 0x02,
0x80, 0x01, 0x22, 0x2d,
0x00, 0x00, 0x00, 0x00,
0xde, 0xad, 0xc0, 0xde
};
static bool i2cr_check_parity32(u32 v, bool parity)
{
u32 i;
for (i = 0; i < 32; ++i) {
if (v & (1u << i))
parity = !parity;
}
return parity;
}
static bool i2cr_check_parity64(u64 v)
{
u32 i;
bool parity = I2CR_INITIAL_PARITY;
for (i = 0; i < 64; ++i) {
if (v & (1llu << i))
parity = !parity;
}
return parity;
}
static u32 i2cr_get_command(u32 address, bool parity)
{
address <<= 1;
if (i2cr_check_parity32(address, parity))
address |= 1;
return address;
}
static int i2cr_transfer(struct i2c_client *client, u32 command, u64 *data)
{
struct i2c_msg msgs[2];
int ret;
msgs[0].addr = client->addr;
msgs[0].flags = 0;
msgs[0].len = sizeof(command);
msgs[0].buf = (__u8 *)&command;
msgs[1].addr = client->addr;
msgs[1].flags = I2C_M_RD;
msgs[1].len = sizeof(*data);
msgs[1].buf = (__u8 *)data;
ret = i2c_transfer(client->adapter, msgs, 2);
if (ret == 2)
return 0;
trace_i2cr_i2c_error(client, command, ret);
if (ret < 0)
return ret;
return -EIO;
}
static int i2cr_check_status(struct i2c_client *client)
{
u64 status;
int ret;
ret = i2cr_transfer(client, I2CR_STATUS_CMD, &status);
if (ret)
return ret;
if (status & I2CR_STATUS_ERR) {
u32 buf[3] = { 0, 0, 0 };
u64 error;
u64 log;
i2cr_transfer(client, I2CR_ERROR_CMD, &error);
i2cr_transfer(client, I2CR_LOG_CMD, &log);
trace_i2cr_status_error(client, status, error, log);
buf[0] = I2CR_STATUS_CMD;
i2c_master_send(client, (const char *)buf, sizeof(buf));
buf[0] = I2CR_ERROR_CMD;
i2c_master_send(client, (const char *)buf, sizeof(buf));
buf[0] = I2CR_LOG_CMD;
i2c_master_send(client, (const char *)buf, sizeof(buf));
dev_err(&client->dev, "status:%016llx error:%016llx log:%016llx\n", status, error,
log);
return -EREMOTEIO;
}
trace_i2cr_status(client, status);
return 0;
}
int fsi_master_i2cr_read(struct fsi_master_i2cr *i2cr, u32 addr, u64 *data)
{
u32 command = i2cr_get_command(addr, I2CR_INITIAL_PARITY);
int ret;
mutex_lock(&i2cr->lock);
ret = i2cr_transfer(i2cr->client, command, data);
if (ret)
goto unlock;
ret = i2cr_check_status(i2cr->client);
if (ret)
goto unlock;
trace_i2cr_read(i2cr->client, command, data);
unlock:
mutex_unlock(&i2cr->lock);
return ret;
}
EXPORT_SYMBOL_GPL(fsi_master_i2cr_read);
int fsi_master_i2cr_write(struct fsi_master_i2cr *i2cr, u32 addr, u64 data)
{
u32 buf[3] = { 0 };
int ret;
buf[0] = i2cr_get_command(addr, i2cr_check_parity64(data));
memcpy(&buf[1], &data, sizeof(data));
mutex_lock(&i2cr->lock);
ret = i2c_master_send(i2cr->client, (const char *)buf, sizeof(buf));
if (ret == sizeof(buf)) {
ret = i2cr_check_status(i2cr->client);
if (!ret)
trace_i2cr_write(i2cr->client, buf[0], data);
} else {
trace_i2cr_i2c_error(i2cr->client, buf[0], ret);
if (ret >= 0)
ret = -EIO;
}
mutex_unlock(&i2cr->lock);
return ret;
}
EXPORT_SYMBOL_GPL(fsi_master_i2cr_write);
static int i2cr_read(struct fsi_master *master, int link, uint8_t id, uint32_t addr, void *val,
size_t size)
{
struct fsi_master_i2cr *i2cr = container_of(master, struct fsi_master_i2cr, master);
u64 data;
size_t i;
int ret;
if (link || id || (addr & 0xffff0000) || !(size == 1 || size == 2 || size == 4))
return -EINVAL;
/*
* The I2CR doesn't have CFAM or FSI slave address space - only the
* engines. In order for this to work with the FSI core, we need to
* emulate at minimum the CFAM config table so that the appropriate
* engines are discovered.
*/
if (addr < 0xc00) {
if (addr > sizeof(i2cr_cfam) - 4)
addr = (addr & 0x3) + (sizeof(i2cr_cfam) - 4);
memcpy(val, &i2cr_cfam[addr], size);
return 0;
}
ret = fsi_master_i2cr_read(i2cr, I2CR_ADDRESS_CFAM(addr), &data);
if (ret)
return ret;
/*
* FSI core expects up to 4 bytes BE back, while I2CR replied with LE
* bytes on the wire.
*/
for (i = 0; i < size; ++i)
((u8 *)val)[i] = ((u8 *)&data)[7 - i];
return 0;
}
static int i2cr_write(struct fsi_master *master, int link, uint8_t id, uint32_t addr,
const void *val, size_t size)
{
struct fsi_master_i2cr *i2cr = container_of(master, struct fsi_master_i2cr, master);
u64 data = 0;
size_t i;
if (link || id || (addr & 0xffff0000) || !(size == 1 || size == 2 || size == 4))
return -EINVAL;
/* I2CR writes to CFAM or FSI slave address are a successful no-op. */
if (addr < 0xc00)
return 0;
/*
* FSI core passes up to 4 bytes BE, while the I2CR expects LE bytes on
* the wire.
*/
for (i = 0; i < size; ++i)
((u8 *)&data)[7 - i] = ((u8 *)val)[i];
return fsi_master_i2cr_write(i2cr, I2CR_ADDRESS_CFAM(addr), data);
}
static void i2cr_release(struct device *dev)
{
struct fsi_master_i2cr *i2cr = to_fsi_master_i2cr(to_fsi_master(dev));
of_node_put(dev->of_node);
kfree(i2cr);
}
static int i2cr_probe(struct i2c_client *client)
{
struct fsi_master_i2cr *i2cr;
int ret;
i2cr = kzalloc(sizeof(*i2cr), GFP_KERNEL);
if (!i2cr)
return -ENOMEM;
/* Only one I2CR on any given I2C bus (fixed I2C device address) */
i2cr->master.idx = client->adapter->nr;
dev_set_name(&i2cr->master.dev, "i2cr%d", i2cr->master.idx);
i2cr->master.dev.parent = &client->dev;
i2cr->master.dev.of_node = of_node_get(dev_of_node(&client->dev));
i2cr->master.dev.release = i2cr_release;
i2cr->master.n_links = 1;
i2cr->master.read = i2cr_read;
i2cr->master.write = i2cr_write;
mutex_init(&i2cr->lock);
i2cr->client = client;
ret = fsi_master_register(&i2cr->master);
if (ret)
return ret;
i2c_set_clientdata(client, i2cr);
return 0;
}
static void i2cr_remove(struct i2c_client *client)
{
struct fsi_master_i2cr *i2cr = i2c_get_clientdata(client);
fsi_master_unregister(&i2cr->master);
}
static const struct of_device_id i2cr_ids[] = {
{ .compatible = "ibm,i2cr-fsi-master" },
{ }
};
MODULE_DEVICE_TABLE(of, i2cr_ids);
static struct i2c_driver i2cr_driver = {
.probe = i2cr_probe,
.remove = i2cr_remove,
.driver = {
.name = "fsi-master-i2cr",
.of_match_table = i2cr_ids,
},
};
module_i2c_driver(i2cr_driver)
MODULE_AUTHOR("Eddie James <[email protected]>");
MODULE_DESCRIPTION("IBM I2C Responder virtual FSI master driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/fsi/fsi-master-i2cr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* A FSI master controller, using a simple GPIO bit-banging interface
*/
#include <linux/crc4.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fsi.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/irqflags.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "fsi-master.h"
#define FSI_GPIO_STD_DLY 1 /* Standard pin delay in nS */
#define LAST_ADDR_INVALID 0x1
struct fsi_master_gpio {
struct fsi_master master;
struct device *dev;
struct mutex cmd_lock; /* mutex for command ordering */
struct gpio_desc *gpio_clk;
struct gpio_desc *gpio_data;
struct gpio_desc *gpio_trans; /* Voltage translator */
struct gpio_desc *gpio_enable; /* FSI enable */
struct gpio_desc *gpio_mux; /* Mux control */
bool external_mode;
bool no_delays;
uint32_t last_addr;
uint8_t t_send_delay;
uint8_t t_echo_delay;
};
#define CREATE_TRACE_POINTS
#include <trace/events/fsi_master_gpio.h>
#define to_fsi_master_gpio(m) container_of(m, struct fsi_master_gpio, master)
struct fsi_gpio_msg {
uint64_t msg;
uint8_t bits;
};
static void clock_toggle(struct fsi_master_gpio *master, int count)
{
int i;
for (i = 0; i < count; i++) {
if (!master->no_delays)
ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 0);
if (!master->no_delays)
ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 1);
}
}
static int sda_clock_in(struct fsi_master_gpio *master)
{
int in;
if (!master->no_delays)
ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 0);
/* Dummy read to feed the synchronizers */
gpiod_get_value(master->gpio_data);
/* Actual data read */
in = gpiod_get_value(master->gpio_data);
if (!master->no_delays)
ndelay(FSI_GPIO_STD_DLY);
gpiod_set_value(master->gpio_clk, 1);
return in ? 1 : 0;
}
static void sda_out(struct fsi_master_gpio *master, int value)
{
gpiod_set_value(master->gpio_data, value);
}
static void set_sda_input(struct fsi_master_gpio *master)
{
gpiod_direction_input(master->gpio_data);
gpiod_set_value(master->gpio_trans, 0);
}
static void set_sda_output(struct fsi_master_gpio *master, int value)
{
gpiod_set_value(master->gpio_trans, 1);
gpiod_direction_output(master->gpio_data, value);
}
static void clock_zeros(struct fsi_master_gpio *master, int count)
{
trace_fsi_master_gpio_clock_zeros(master, count);
set_sda_output(master, 1);
clock_toggle(master, count);
}
static void echo_delay(struct fsi_master_gpio *master)
{
clock_zeros(master, master->t_echo_delay);
}
static void serial_in(struct fsi_master_gpio *master, struct fsi_gpio_msg *msg,
uint8_t num_bits)
{
uint8_t bit, in_bit;
set_sda_input(master);
for (bit = 0; bit < num_bits; bit++) {
in_bit = sda_clock_in(master);
msg->msg <<= 1;
msg->msg |= ~in_bit & 0x1; /* Data is active low */
}
msg->bits += num_bits;
trace_fsi_master_gpio_in(master, num_bits, msg->msg);
}
static void serial_out(struct fsi_master_gpio *master,
const struct fsi_gpio_msg *cmd)
{
uint8_t bit;
uint64_t msg = ~cmd->msg; /* Data is active low */
uint64_t sda_mask = 0x1ULL << (cmd->bits - 1);
uint64_t last_bit = ~0;
int next_bit;
trace_fsi_master_gpio_out(master, cmd->bits, cmd->msg);
if (!cmd->bits) {
dev_warn(master->dev, "trying to output 0 bits\n");
return;
}
set_sda_output(master, 0);
/* Send the start bit */
sda_out(master, 0);
clock_toggle(master, 1);
/* Send the message */
for (bit = 0; bit < cmd->bits; bit++) {
next_bit = (msg & sda_mask) >> (cmd->bits - 1);
if (last_bit ^ next_bit) {
sda_out(master, next_bit);
last_bit = next_bit;
}
clock_toggle(master, 1);
msg <<= 1;
}
}
static void msg_push_bits(struct fsi_gpio_msg *msg, uint64_t data, int bits)
{
msg->msg <<= bits;
msg->msg |= data & ((1ull << bits) - 1);
msg->bits += bits;
}
static void msg_push_crc(struct fsi_gpio_msg *msg)
{
uint8_t crc;
int top;
top = msg->bits & 0x3;
/* start bit, and any non-aligned top bits */
crc = crc4(0, 1 << top | msg->msg >> (msg->bits - top), top + 1);
/* aligned bits */
crc = crc4(crc, msg->msg, msg->bits - top);
msg_push_bits(msg, crc, 4);
}
static bool check_same_address(struct fsi_master_gpio *master, int id,
uint32_t addr)
{
/* this will also handle LAST_ADDR_INVALID */
return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
}
static bool check_relative_address(struct fsi_master_gpio *master, int id,
uint32_t addr, uint32_t *rel_addrp)
{
uint32_t last_addr = master->last_addr;
int32_t rel_addr;
if (last_addr == LAST_ADDR_INVALID)
return false;
/* We may be in 23-bit addressing mode, which uses the id as the
* top two address bits. So, if we're referencing a different ID,
* use absolute addresses.
*/
if (((last_addr >> 21) & 0x3) != id)
return false;
/* remove the top two bits from any 23-bit addressing */
last_addr &= (1 << 21) - 1;
/* We know that the addresses are limited to 21 bits, so this won't
* overflow the signed rel_addr */
rel_addr = addr - last_addr;
if (rel_addr > 255 || rel_addr < -256)
return false;
*rel_addrp = (uint32_t)rel_addr;
return true;
}
static void last_address_update(struct fsi_master_gpio *master,
int id, bool valid, uint32_t addr)
{
if (!valid)
master->last_addr = LAST_ADDR_INVALID;
else
master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
}
/*
* Encode an Absolute/Relative/Same Address command
*/
static void build_ar_command(struct fsi_master_gpio *master,
struct fsi_gpio_msg *cmd, uint8_t id,
uint32_t addr, size_t size, const void *data)
{
int i, addr_bits, opcode_bits;
bool write = !!data;
uint8_t ds, opcode;
uint32_t rel_addr;
cmd->bits = 0;
cmd->msg = 0;
/* we have 21 bits of address max */
addr &= ((1 << 21) - 1);
/* cmd opcodes are variable length - SAME_AR is only two bits */
opcode_bits = 3;
if (check_same_address(master, id, addr)) {
/* we still address the byte offset within the word */
addr_bits = 2;
opcode_bits = 2;
opcode = FSI_CMD_SAME_AR;
trace_fsi_master_gpio_cmd_same_addr(master);
} else if (check_relative_address(master, id, addr, &rel_addr)) {
/* 8 bits plus sign */
addr_bits = 9;
addr = rel_addr;
opcode = FSI_CMD_REL_AR;
trace_fsi_master_gpio_cmd_rel_addr(master, rel_addr);
} else {
addr_bits = 21;
opcode = FSI_CMD_ABS_AR;
trace_fsi_master_gpio_cmd_abs_addr(master, addr);
}
/*
* The read/write size is encoded in the lower bits of the address
* (as it must be naturally-aligned), and the following ds bit.
*
* size addr:1 addr:0 ds
* 1 x x 0
* 2 x 0 1
* 4 0 1 1
*
*/
ds = size > 1 ? 1 : 0;
addr &= ~(size - 1);
if (size == 4)
addr |= 1;
msg_push_bits(cmd, id, 2);
msg_push_bits(cmd, opcode, opcode_bits);
msg_push_bits(cmd, write ? 0 : 1, 1);
msg_push_bits(cmd, addr, addr_bits);
msg_push_bits(cmd, ds, 1);
for (i = 0; write && i < size; i++)
msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
msg_push_crc(cmd);
}
static void build_dpoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
{
cmd->bits = 0;
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
msg_push_crc(cmd);
}
static void build_epoll_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
{
cmd->bits = 0;
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
msg_push_crc(cmd);
}
static void build_term_command(struct fsi_gpio_msg *cmd, uint8_t slave_id)
{
cmd->bits = 0;
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
msg_push_bits(cmd, FSI_CMD_TERM, 6);
msg_push_crc(cmd);
}
/*
* Note: callers rely specifically on this returning -EAGAIN for
* a CRC error detected in the response. Use other error code
* for other situations. It will be converted to something else
* higher up the stack before it reaches userspace.
*/
static int read_one_response(struct fsi_master_gpio *master,
uint8_t data_size, struct fsi_gpio_msg *msgp, uint8_t *tagp)
{
struct fsi_gpio_msg msg;
unsigned long flags;
uint32_t crc;
uint8_t tag;
int i;
local_irq_save(flags);
/* wait for the start bit */
for (i = 0; i < FSI_MASTER_MTOE_COUNT; i++) {
msg.bits = 0;
msg.msg = 0;
serial_in(master, &msg, 1);
if (msg.msg)
break;
}
if (i == FSI_MASTER_MTOE_COUNT) {
dev_dbg(master->dev,
"Master time out waiting for response\n");
local_irq_restore(flags);
return -ETIMEDOUT;
}
msg.bits = 0;
msg.msg = 0;
/* Read slave ID & response tag */
serial_in(master, &msg, 4);
tag = msg.msg & 0x3;
/* If we have an ACK and we're expecting data, clock the data in too */
if (tag == FSI_RESP_ACK && data_size)
serial_in(master, &msg, data_size * 8);
/* read CRC */
serial_in(master, &msg, FSI_CRC_SIZE);
local_irq_restore(flags);
/* we have a whole message now; check CRC */
crc = crc4(0, 1, 1);
crc = crc4(crc, msg.msg, msg.bits);
if (crc) {
/* Check if it's all 1's, that probably means the host is off */
if (((~msg.msg) & ((1ull << msg.bits) - 1)) == 0)
return -ENODEV;
dev_dbg(master->dev, "ERR response CRC msg: 0x%016llx (%d bits)\n",
msg.msg, msg.bits);
return -EAGAIN;
}
if (msgp)
*msgp = msg;
if (tagp)
*tagp = tag;
return 0;
}
static int issue_term(struct fsi_master_gpio *master, uint8_t slave)
{
struct fsi_gpio_msg cmd;
unsigned long flags;
uint8_t tag;
int rc;
build_term_command(&cmd, slave);
local_irq_save(flags);
serial_out(master, &cmd);
echo_delay(master);
local_irq_restore(flags);
rc = read_one_response(master, 0, NULL, &tag);
if (rc < 0) {
dev_err(master->dev,
"TERM failed; lost communication with slave\n");
return -EIO;
} else if (tag != FSI_RESP_ACK) {
dev_err(master->dev, "TERM failed; response %d\n", tag);
return -EIO;
}
return 0;
}
static int poll_for_response(struct fsi_master_gpio *master,
uint8_t slave, uint8_t size, void *data)
{
struct fsi_gpio_msg response, cmd;
int busy_count = 0, rc, i;
unsigned long flags;
uint8_t tag;
uint8_t *data_byte = data;
int crc_err_retries = 0;
retry:
rc = read_one_response(master, size, &response, &tag);
/* Handle retries on CRC errors */
if (rc == -EAGAIN) {
/* Too many retries ? */
if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
/*
* Pass it up as a -EIO otherwise upper level will retry
* the whole command which isn't what we want here.
*/
rc = -EIO;
goto fail;
}
dev_dbg(master->dev,
"CRC error retry %d\n", crc_err_retries);
trace_fsi_master_gpio_crc_rsp_error(master);
build_epoll_command(&cmd, slave);
local_irq_save(flags);
clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
serial_out(master, &cmd);
echo_delay(master);
local_irq_restore(flags);
goto retry;
} else if (rc)
goto fail;
switch (tag) {
case FSI_RESP_ACK:
if (size && data) {
uint64_t val = response.msg;
/* clear crc & mask */
val >>= 4;
val &= (1ull << (size * 8)) - 1;
for (i = 0; i < size; i++) {
data_byte[size-i-1] = val;
val >>= 8;
}
}
break;
case FSI_RESP_BUSY:
/*
* Its necessary to clock slave before issuing
* d-poll, not indicated in the hardware protocol
* spec. < 20 clocks causes slave to hang, 21 ok.
*/
if (busy_count++ < FSI_MASTER_MAX_BUSY) {
build_dpoll_command(&cmd, slave);
local_irq_save(flags);
clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
serial_out(master, &cmd);
echo_delay(master);
local_irq_restore(flags);
goto retry;
}
dev_warn(master->dev,
"ERR slave is stuck in busy state, issuing TERM\n");
local_irq_save(flags);
clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
local_irq_restore(flags);
issue_term(master, slave);
rc = -EIO;
break;
case FSI_RESP_ERRA:
dev_dbg(master->dev, "ERRA received: 0x%x\n", (int)response.msg);
rc = -EIO;
break;
case FSI_RESP_ERRC:
dev_dbg(master->dev, "ERRC received: 0x%x\n", (int)response.msg);
trace_fsi_master_gpio_crc_cmd_error(master);
rc = -EAGAIN;
break;
}
if (busy_count > 0)
trace_fsi_master_gpio_poll_response_busy(master, busy_count);
fail:
/*
* tSendDelay clocks, avoids signal reflections when switching
* from receive of response back to send of data.
*/
local_irq_save(flags);
clock_zeros(master, master->t_send_delay);
local_irq_restore(flags);
return rc;
}
static int send_request(struct fsi_master_gpio *master,
struct fsi_gpio_msg *cmd)
{
unsigned long flags;
if (master->external_mode)
return -EBUSY;
local_irq_save(flags);
serial_out(master, cmd);
echo_delay(master);
local_irq_restore(flags);
return 0;
}
static int fsi_master_gpio_xfer(struct fsi_master_gpio *master, uint8_t slave,
struct fsi_gpio_msg *cmd, size_t resp_len, void *resp)
{
int rc = -EAGAIN, retries = 0;
while ((retries++) < FSI_CRC_ERR_RETRIES) {
rc = send_request(master, cmd);
if (rc)
break;
rc = poll_for_response(master, slave, resp_len, resp);
if (rc != -EAGAIN)
break;
rc = -EIO;
dev_warn(master->dev, "ECRC retry %d\n", retries);
/* Pace it a bit before retry */
msleep(1);
}
return rc;
}
static int fsi_master_gpio_read(struct fsi_master *_master, int link,
uint8_t id, uint32_t addr, void *val, size_t size)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
int rc;
if (link != 0)
return -ENODEV;
mutex_lock(&master->cmd_lock);
build_ar_command(master, &cmd, id, addr, size, NULL);
rc = fsi_master_gpio_xfer(master, id, &cmd, size, val);
last_address_update(master, id, rc == 0, addr);
mutex_unlock(&master->cmd_lock);
return rc;
}
static int fsi_master_gpio_write(struct fsi_master *_master, int link,
uint8_t id, uint32_t addr, const void *val, size_t size)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
int rc;
if (link != 0)
return -ENODEV;
mutex_lock(&master->cmd_lock);
build_ar_command(master, &cmd, id, addr, size, val);
rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
last_address_update(master, id, rc == 0, addr);
mutex_unlock(&master->cmd_lock);
return rc;
}
static int fsi_master_gpio_term(struct fsi_master *_master,
int link, uint8_t id)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
struct fsi_gpio_msg cmd;
int rc;
if (link != 0)
return -ENODEV;
mutex_lock(&master->cmd_lock);
build_term_command(&cmd, id);
rc = fsi_master_gpio_xfer(master, id, &cmd, 0, NULL);
last_address_update(master, id, false, 0);
mutex_unlock(&master->cmd_lock);
return rc;
}
static int fsi_master_gpio_break(struct fsi_master *_master, int link)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
unsigned long flags;
if (link != 0)
return -ENODEV;
trace_fsi_master_gpio_break(master);
mutex_lock(&master->cmd_lock);
if (master->external_mode) {
mutex_unlock(&master->cmd_lock);
return -EBUSY;
}
local_irq_save(flags);
set_sda_output(master, 1);
sda_out(master, 1);
clock_toggle(master, FSI_PRE_BREAK_CLOCKS);
sda_out(master, 0);
clock_toggle(master, FSI_BREAK_CLOCKS);
echo_delay(master);
sda_out(master, 1);
clock_toggle(master, FSI_POST_BREAK_CLOCKS);
local_irq_restore(flags);
last_address_update(master, 0, false, 0);
mutex_unlock(&master->cmd_lock);
/* Wait for logic reset to take effect */
udelay(200);
return 0;
}
static void fsi_master_gpio_init(struct fsi_master_gpio *master)
{
unsigned long flags;
gpiod_direction_output(master->gpio_mux, 1);
gpiod_direction_output(master->gpio_trans, 1);
gpiod_direction_output(master->gpio_enable, 1);
gpiod_direction_output(master->gpio_clk, 1);
gpiod_direction_output(master->gpio_data, 1);
/* todo: evaluate if clocks can be reduced */
local_irq_save(flags);
clock_zeros(master, FSI_INIT_CLOCKS);
local_irq_restore(flags);
}
static void fsi_master_gpio_init_external(struct fsi_master_gpio *master)
{
gpiod_direction_output(master->gpio_mux, 0);
gpiod_direction_output(master->gpio_trans, 0);
gpiod_direction_output(master->gpio_enable, 1);
gpiod_direction_input(master->gpio_clk);
gpiod_direction_input(master->gpio_data);
}
static int fsi_master_gpio_link_enable(struct fsi_master *_master, int link,
bool enable)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
int rc = -EBUSY;
if (link != 0)
return -ENODEV;
mutex_lock(&master->cmd_lock);
if (!master->external_mode) {
gpiod_set_value(master->gpio_enable, enable ? 1 : 0);
rc = 0;
}
mutex_unlock(&master->cmd_lock);
return rc;
}
static int fsi_master_gpio_link_config(struct fsi_master *_master, int link,
u8 t_send_delay, u8 t_echo_delay)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(_master);
if (link != 0)
return -ENODEV;
mutex_lock(&master->cmd_lock);
master->t_send_delay = t_send_delay;
master->t_echo_delay = t_echo_delay;
mutex_unlock(&master->cmd_lock);
return 0;
}
static ssize_t external_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fsi_master_gpio *master = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1, "%u\n",
master->external_mode ? 1 : 0);
}
static ssize_t external_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fsi_master_gpio *master = dev_get_drvdata(dev);
unsigned long val;
bool external_mode;
int err;
err = kstrtoul(buf, 0, &val);
if (err)
return err;
external_mode = !!val;
mutex_lock(&master->cmd_lock);
if (external_mode == master->external_mode) {
mutex_unlock(&master->cmd_lock);
return count;
}
master->external_mode = external_mode;
if (master->external_mode)
fsi_master_gpio_init_external(master);
else
fsi_master_gpio_init(master);
mutex_unlock(&master->cmd_lock);
fsi_master_rescan(&master->master);
return count;
}
static DEVICE_ATTR(external_mode, 0664,
external_mode_show, external_mode_store);
static void fsi_master_gpio_release(struct device *dev)
{
struct fsi_master_gpio *master = to_fsi_master_gpio(to_fsi_master(dev));
of_node_put(dev_of_node(master->dev));
kfree(master);
}
static int fsi_master_gpio_probe(struct platform_device *pdev)
{
struct fsi_master_gpio *master;
struct gpio_desc *gpio;
int rc;
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
master->dev = &pdev->dev;
master->master.dev.parent = master->dev;
master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
master->master.dev.release = fsi_master_gpio_release;
master->last_addr = LAST_ADDR_INVALID;
gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get clock gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_clk = gpio;
gpio = devm_gpiod_get(&pdev->dev, "data", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get data gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_data = gpio;
/* Optional GPIOs */
gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get trans gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_trans = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get enable gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_enable = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get mux gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_mux = gpio;
/*
* Check if GPIO block is slow enought that no extra delays
* are necessary. This improves performance on ast2500 by
* an order of magnitude.
*/
master->no_delays = device_property_present(&pdev->dev, "no-gpio-delays");
/* Default FSI command delays */
master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
master->master.n_links = 1;
master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
master->master.read = fsi_master_gpio_read;
master->master.write = fsi_master_gpio_write;
master->master.term = fsi_master_gpio_term;
master->master.send_break = fsi_master_gpio_break;
master->master.link_enable = fsi_master_gpio_link_enable;
master->master.link_config = fsi_master_gpio_link_config;
platform_set_drvdata(pdev, master);
mutex_init(&master->cmd_lock);
fsi_master_gpio_init(master);
rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
if (rc)
goto err_free;
rc = fsi_master_register(&master->master);
if (rc) {
device_remove_file(&pdev->dev, &dev_attr_external_mode);
put_device(&master->master.dev);
return rc;
}
return 0;
err_free:
kfree(master);
return rc;
}
static int fsi_master_gpio_remove(struct platform_device *pdev)
{
struct fsi_master_gpio *master = platform_get_drvdata(pdev);
device_remove_file(&pdev->dev, &dev_attr_external_mode);
fsi_master_unregister(&master->master);
return 0;
}
static const struct of_device_id fsi_master_gpio_match[] = {
{ .compatible = "fsi-master-gpio" },
{ },
};
MODULE_DEVICE_TABLE(of, fsi_master_gpio_match);
static struct platform_driver fsi_master_gpio_driver = {
.driver = {
.name = "fsi-master-gpio",
.of_match_table = fsi_master_gpio_match,
},
.probe = fsi_master_gpio_probe,
.remove = fsi_master_gpio_remove,
};
module_platform_driver(fsi_master_gpio_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/fsi/fsi-master-gpio.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) IBM Corporation 2023 */
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/fsi.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include "fsi-master-i2cr.h"
#include "fsi-slave.h"
struct i2cr_scom {
struct device dev;
struct cdev cdev;
struct fsi_master_i2cr *i2cr;
};
static loff_t i2cr_scom_llseek(struct file *file, loff_t offset, int whence)
{
switch (whence) {
case SEEK_CUR:
break;
case SEEK_SET:
file->f_pos = offset;
break;
default:
return -EINVAL;
}
return offset;
}
static ssize_t i2cr_scom_read(struct file *filep, char __user *buf, size_t len, loff_t *offset)
{
struct i2cr_scom *scom = filep->private_data;
u64 data;
int ret;
if (len != sizeof(data))
return -EINVAL;
ret = fsi_master_i2cr_read(scom->i2cr, (u32)*offset, &data);
if (ret)
return ret;
ret = copy_to_user(buf, &data, len);
if (ret)
return ret;
return len;
}
static ssize_t i2cr_scom_write(struct file *filep, const char __user *buf, size_t len,
loff_t *offset)
{
struct i2cr_scom *scom = filep->private_data;
u64 data;
int ret;
if (len != sizeof(data))
return -EINVAL;
ret = copy_from_user(&data, buf, len);
if (ret)
return ret;
ret = fsi_master_i2cr_write(scom->i2cr, (u32)*offset, data);
if (ret)
return ret;
return len;
}
static const struct file_operations i2cr_scom_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.llseek = i2cr_scom_llseek,
.read = i2cr_scom_read,
.write = i2cr_scom_write,
};
static int i2cr_scom_probe(struct device *dev)
{
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct i2cr_scom *scom;
int didx;
int ret;
if (!is_fsi_master_i2cr(fsi_dev->slave->master))
return -ENODEV;
scom = devm_kzalloc(dev, sizeof(*scom), GFP_KERNEL);
if (!scom)
return -ENOMEM;
scom->i2cr = to_fsi_master_i2cr(fsi_dev->slave->master);
dev_set_drvdata(dev, scom);
scom->dev.type = &fsi_cdev_type;
scom->dev.parent = dev;
device_initialize(&scom->dev);
ret = fsi_get_new_minor(fsi_dev, fsi_dev_scom, &scom->dev.devt, &didx);
if (ret)
return ret;
dev_set_name(&scom->dev, "scom%d", didx);
cdev_init(&scom->cdev, &i2cr_scom_fops);
ret = cdev_device_add(&scom->cdev, &scom->dev);
if (ret)
fsi_free_minor(scom->dev.devt);
return ret;
}
static int i2cr_scom_remove(struct device *dev)
{
struct i2cr_scom *scom = dev_get_drvdata(dev);
cdev_device_del(&scom->cdev, &scom->dev);
fsi_free_minor(scom->dev.devt);
return 0;
}
static const struct of_device_id i2cr_scom_of_ids[] = {
{ .compatible = "ibm,i2cr-scom" },
{ }
};
MODULE_DEVICE_TABLE(of, i2cr_scom_of_ids);
static const struct fsi_device_id i2cr_scom_ids[] = {
{ 0x5, FSI_VERSION_ANY },
{ }
};
static struct fsi_driver i2cr_scom_driver = {
.id_table = i2cr_scom_ids,
.drv = {
.name = "i2cr_scom",
.bus = &fsi_bus_type,
.of_match_table = i2cr_scom_of_ids,
.probe = i2cr_scom_probe,
.remove = i2cr_scom_remove,
}
};
module_fsi_driver(i2cr_scom_driver);
MODULE_AUTHOR("Eddie James <[email protected]>");
MODULE_DESCRIPTION("IBM I2C Responder SCOM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/fsi/i2cr-scom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) IBM Corporation 2017
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/fsi.h>
#include <linux/fsi-sbefifo.h>
#include <linux/kernel.h>
#include <linux/cdev.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <uapi/linux/fsi.h>
/*
* The SBEFIFO is a pipe-like FSI device for communicating with
* the self boot engine on POWER processors.
*/
#define DEVICE_NAME "sbefifo"
#define FSI_ENGID_SBE 0x22
/*
* Register layout
*/
/* Register banks */
#define SBEFIFO_UP 0x00 /* FSI -> Host */
#define SBEFIFO_DOWN 0x40 /* Host -> FSI */
/* Per-bank registers */
#define SBEFIFO_FIFO 0x00 /* The FIFO itself */
#define SBEFIFO_STS 0x04 /* Status register */
#define SBEFIFO_STS_PARITY_ERR 0x20000000
#define SBEFIFO_STS_RESET_REQ 0x02000000
#define SBEFIFO_STS_GOT_EOT 0x00800000
#define SBEFIFO_STS_MAX_XFER_LIMIT 0x00400000
#define SBEFIFO_STS_FULL 0x00200000
#define SBEFIFO_STS_EMPTY 0x00100000
#define SBEFIFO_STS_ECNT_MASK 0x000f0000
#define SBEFIFO_STS_ECNT_SHIFT 16
#define SBEFIFO_STS_VALID_MASK 0x0000ff00
#define SBEFIFO_STS_VALID_SHIFT 8
#define SBEFIFO_STS_EOT_MASK 0x000000ff
#define SBEFIFO_STS_EOT_SHIFT 0
#define SBEFIFO_EOT_RAISE 0x08 /* (Up only) Set End Of Transfer */
#define SBEFIFO_REQ_RESET 0x0C /* (Up only) Reset Request */
#define SBEFIFO_PERFORM_RESET 0x10 /* (Down only) Perform Reset */
#define SBEFIFO_EOT_ACK 0x14 /* (Down only) Acknowledge EOT */
#define SBEFIFO_DOWN_MAX 0x18 /* (Down only) Max transfer */
/* CFAM GP Mailbox SelfBoot Message register */
#define CFAM_GP_MBOX_SBM_ADDR 0x2824 /* Converted 0x2809 */
#define CFAM_SBM_SBE_BOOTED 0x80000000
#define CFAM_SBM_SBE_ASYNC_FFDC 0x40000000
#define CFAM_SBM_SBE_STATE_MASK 0x00f00000
#define CFAM_SBM_SBE_STATE_SHIFT 20
enum sbe_state
{
SBE_STATE_UNKNOWN = 0x0, // Unknown, initial state
SBE_STATE_IPLING = 0x1, // IPL'ing - autonomous mode (transient)
SBE_STATE_ISTEP = 0x2, // ISTEP - Running IPL by steps (transient)
SBE_STATE_MPIPL = 0x3, // MPIPL
SBE_STATE_RUNTIME = 0x4, // SBE Runtime
SBE_STATE_DMT = 0x5, // Dead Man Timer State (transient)
SBE_STATE_DUMP = 0x6, // Dumping
SBE_STATE_FAILURE = 0x7, // Internal SBE failure
SBE_STATE_QUIESCE = 0x8, // Final state - needs SBE reset to get out
};
/* FIFO depth */
#define SBEFIFO_FIFO_DEPTH 8
/* Helpers */
#define sbefifo_empty(sts) ((sts) & SBEFIFO_STS_EMPTY)
#define sbefifo_full(sts) ((sts) & SBEFIFO_STS_FULL)
#define sbefifo_parity_err(sts) ((sts) & SBEFIFO_STS_PARITY_ERR)
#define sbefifo_populated(sts) (((sts) & SBEFIFO_STS_ECNT_MASK) >> SBEFIFO_STS_ECNT_SHIFT)
#define sbefifo_vacant(sts) (SBEFIFO_FIFO_DEPTH - sbefifo_populated(sts))
#define sbefifo_eot_set(sts) (((sts) & SBEFIFO_STS_EOT_MASK) >> SBEFIFO_STS_EOT_SHIFT)
/* Reset request timeout in ms */
#define SBEFIFO_RESET_TIMEOUT 10000
/* Timeouts for commands in ms */
#define SBEFIFO_TIMEOUT_START_CMD 10000
#define SBEFIFO_TIMEOUT_IN_CMD 1000
#define SBEFIFO_TIMEOUT_START_RSP 10000
#define SBEFIFO_TIMEOUT_IN_RSP 1000
/* Other constants */
#define SBEFIFO_MAX_USER_CMD_LEN (0x100000 + PAGE_SIZE)
#define SBEFIFO_RESET_MAGIC 0x52534554 /* "RSET" */
struct sbefifo {
uint32_t magic;
#define SBEFIFO_MAGIC 0x53424546 /* "SBEF" */
struct fsi_device *fsi_dev;
struct device dev;
struct cdev cdev;
struct mutex lock;
bool broken;
bool dead;
bool async_ffdc;
bool timed_out;
u32 timeout_in_cmd_ms;
u32 timeout_start_rsp_ms;
};
struct sbefifo_user {
struct sbefifo *sbefifo;
struct mutex file_lock;
void *cmd_page;
void *pending_cmd;
size_t pending_len;
u32 cmd_timeout_ms;
u32 read_timeout_ms;
};
static DEFINE_MUTEX(sbefifo_ffdc_mutex);
static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sbefifo *sbefifo = container_of(dev, struct sbefifo, dev);
return sysfs_emit(buf, "%d\n", sbefifo->timed_out ? 1 : 0);
}
static DEVICE_ATTR_RO(timeout);
static void __sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
size_t ffdc_sz, bool internal)
{
int pack = 0;
#define FFDC_LSIZE 60
static char ffdc_line[FFDC_LSIZE];
char *p = ffdc_line;
while (ffdc_sz) {
u32 w0, w1, w2, i;
if (ffdc_sz < 3) {
dev_err(dev, "SBE invalid FFDC package size %zd\n", ffdc_sz);
return;
}
w0 = be32_to_cpu(*(ffdc++));
w1 = be32_to_cpu(*(ffdc++));
w2 = be32_to_cpu(*(ffdc++));
ffdc_sz -= 3;
if ((w0 >> 16) != 0xFFDC) {
dev_err(dev, "SBE invalid FFDC package signature %08x %08x %08x\n",
w0, w1, w2);
break;
}
w0 &= 0xffff;
if (w0 > ffdc_sz) {
dev_err(dev, "SBE FFDC package len %d words but only %zd remaining\n",
w0, ffdc_sz);
w0 = ffdc_sz;
break;
}
if (internal) {
dev_warn(dev, "+---- SBE FFDC package %d for async err -----+\n",
pack++);
} else {
dev_warn(dev, "+---- SBE FFDC package %d for cmd %02x:%02x -----+\n",
pack++, (w1 >> 8) & 0xff, w1 & 0xff);
}
dev_warn(dev, "| Response code: %08x |\n", w2);
dev_warn(dev, "|-------------------------------------------|\n");
for (i = 0; i < w0; i++) {
if ((i & 3) == 0) {
p = ffdc_line;
p += sprintf(p, "| %04x:", i << 4);
}
p += sprintf(p, " %08x", be32_to_cpu(*(ffdc++)));
ffdc_sz--;
if ((i & 3) == 3 || i == (w0 - 1)) {
while ((i & 3) < 3) {
p += sprintf(p, " ");
i++;
}
dev_warn(dev, "%s |\n", ffdc_line);
}
}
dev_warn(dev, "+-------------------------------------------+\n");
}
}
static void sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
size_t ffdc_sz, bool internal)
{
mutex_lock(&sbefifo_ffdc_mutex);
__sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, internal);
mutex_unlock(&sbefifo_ffdc_mutex);
}
int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response,
size_t resp_len, size_t *data_len)
{
u32 dh, s0, s1;
size_t ffdc_sz;
if (resp_len < 3) {
pr_debug("sbefifo: cmd %04x, response too small: %zd\n",
cmd, resp_len);
return -ENXIO;
}
dh = be32_to_cpu(response[resp_len - 1]);
if (dh > resp_len || dh < 3) {
dev_err(dev, "SBE cmd %02x:%02x status offset out of range: %d/%zd\n",
cmd >> 8, cmd & 0xff, dh, resp_len);
return -ENXIO;
}
s0 = be32_to_cpu(response[resp_len - dh]);
s1 = be32_to_cpu(response[resp_len - dh + 1]);
if (((s0 >> 16) != 0xC0DE) || ((s0 & 0xffff) != cmd)) {
dev_err(dev, "SBE cmd %02x:%02x, status signature invalid: 0x%08x 0x%08x\n",
cmd >> 8, cmd & 0xff, s0, s1);
return -ENXIO;
}
if (s1 != 0) {
ffdc_sz = dh - 3;
dev_warn(dev, "SBE error cmd %02x:%02x status=%04x:%04x\n",
cmd >> 8, cmd & 0xff, s1 >> 16, s1 & 0xffff);
if (ffdc_sz)
sbefifo_dump_ffdc(dev, &response[resp_len - dh + 2],
ffdc_sz, false);
}
if (data_len)
*data_len = resp_len - dh;
/*
* Primary status don't have the top bit set, so can't be confused with
* Linux negative error codes, so return the status word whole.
*/
return s1;
}
EXPORT_SYMBOL_GPL(sbefifo_parse_status);
static int sbefifo_regr(struct sbefifo *sbefifo, int reg, u32 *word)
{
__be32 raw_word;
int rc;
rc = fsi_device_read(sbefifo->fsi_dev, reg, &raw_word,
sizeof(raw_word));
if (rc)
return rc;
*word = be32_to_cpu(raw_word);
return 0;
}
static int sbefifo_regw(struct sbefifo *sbefifo, int reg, u32 word)
{
__be32 raw_word = cpu_to_be32(word);
return fsi_device_write(sbefifo->fsi_dev, reg, &raw_word,
sizeof(raw_word));
}
static int sbefifo_check_sbe_state(struct sbefifo *sbefifo)
{
__be32 raw_word;
u32 sbm;
int rc;
rc = fsi_slave_read(sbefifo->fsi_dev->slave, CFAM_GP_MBOX_SBM_ADDR,
&raw_word, sizeof(raw_word));
if (rc)
return rc;
sbm = be32_to_cpu(raw_word);
/* SBE booted at all ? */
if (!(sbm & CFAM_SBM_SBE_BOOTED))
return -ESHUTDOWN;
/* Check its state */
switch ((sbm & CFAM_SBM_SBE_STATE_MASK) >> CFAM_SBM_SBE_STATE_SHIFT) {
case SBE_STATE_UNKNOWN:
return -ESHUTDOWN;
case SBE_STATE_DMT:
return -EBUSY;
case SBE_STATE_IPLING:
case SBE_STATE_ISTEP:
case SBE_STATE_MPIPL:
case SBE_STATE_RUNTIME:
case SBE_STATE_DUMP: /* Not sure about that one */
break;
case SBE_STATE_FAILURE:
case SBE_STATE_QUIESCE:
return -ESHUTDOWN;
}
/* Is there async FFDC available ? Remember it */
if (sbm & CFAM_SBM_SBE_ASYNC_FFDC)
sbefifo->async_ffdc = true;
return 0;
}
/* Don't flip endianness of data to/from FIFO, just pass through. */
static int sbefifo_down_read(struct sbefifo *sbefifo, __be32 *word)
{
return fsi_device_read(sbefifo->fsi_dev, SBEFIFO_DOWN, word,
sizeof(*word));
}
static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
{
return fsi_device_write(sbefifo->fsi_dev, SBEFIFO_UP, &word,
sizeof(word));
}
static int sbefifo_request_reset(struct sbefifo *sbefifo)
{
struct device *dev = &sbefifo->fsi_dev->dev;
unsigned long end_time;
u32 status;
int rc;
dev_dbg(dev, "Requesting FIFO reset\n");
/* Mark broken first, will be cleared if reset succeeds */
sbefifo->broken = true;
/* Send reset request */
rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_REQ_RESET, 1);
if (rc) {
dev_err(dev, "Sending reset request failed, rc=%d\n", rc);
return rc;
}
/* Wait for it to complete */
end_time = jiffies + msecs_to_jiffies(SBEFIFO_RESET_TIMEOUT);
while (!time_after(jiffies, end_time)) {
rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
if (rc) {
dev_err(dev, "Failed to read UP fifo status during reset"
" , rc=%d\n", rc);
return rc;
}
if (!(status & SBEFIFO_STS_RESET_REQ)) {
dev_dbg(dev, "FIFO reset done\n");
sbefifo->broken = false;
return 0;
}
cond_resched();
}
dev_err(dev, "FIFO reset timed out\n");
return -ETIMEDOUT;
}
static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
{
struct device *dev = &sbefifo->fsi_dev->dev;
u32 up_status, down_status;
bool need_reset = false;
int rc;
rc = sbefifo_check_sbe_state(sbefifo);
if (rc) {
dev_dbg(dev, "SBE state=%d\n", rc);
return rc;
}
/* If broken, we don't need to look at status, go straight to reset */
if (sbefifo->broken)
goto do_reset;
rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &up_status);
if (rc) {
dev_err(dev, "Cleanup: Reading UP status failed, rc=%d\n", rc);
/* Will try reset again on next attempt at using it */
sbefifo->broken = true;
return rc;
}
rc = sbefifo_regr(sbefifo, SBEFIFO_DOWN | SBEFIFO_STS, &down_status);
if (rc) {
dev_err(dev, "Cleanup: Reading DOWN status failed, rc=%d\n", rc);
/* Will try reset again on next attempt at using it */
sbefifo->broken = true;
return rc;
}
/* The FIFO already contains a reset request from the SBE ? */
if (down_status & SBEFIFO_STS_RESET_REQ) {
dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
rc = sbefifo_regw(sbefifo, SBEFIFO_DOWN, SBEFIFO_PERFORM_RESET);
if (rc) {
sbefifo->broken = true;
dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
return rc;
}
sbefifo->broken = false;
return 0;
}
/* Parity error on either FIFO ? */
if ((up_status | down_status) & SBEFIFO_STS_PARITY_ERR)
need_reset = true;
/* Either FIFO not empty ? */
if (!((up_status & down_status) & SBEFIFO_STS_EMPTY))
need_reset = true;
if (!need_reset)
return 0;
dev_info(dev, "Cleanup: FIFO not clean (up=0x%08x down=0x%08x)\n",
up_status, down_status);
do_reset:
/* Mark broken, will be cleared if/when reset succeeds */
return sbefifo_request_reset(sbefifo);
}
static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
u32 *status, unsigned long timeout)
{
struct device *dev = &sbefifo->fsi_dev->dev;
unsigned long end_time;
bool ready = false;
u32 addr, sts = 0;
int rc;
dev_vdbg(dev, "Wait on %s fifo...\n", up ? "up" : "down");
addr = (up ? SBEFIFO_UP : SBEFIFO_DOWN) | SBEFIFO_STS;
end_time = jiffies + timeout;
while (!time_after(jiffies, end_time)) {
cond_resched();
rc = sbefifo_regr(sbefifo, addr, &sts);
if (rc < 0) {
dev_err(dev, "FSI error %d reading status register\n", rc);
return rc;
}
if (!up && sbefifo_parity_err(sts)) {
dev_err(dev, "Parity error in DOWN FIFO\n");
return -ENXIO;
}
ready = !(up ? sbefifo_full(sts) : sbefifo_empty(sts));
if (ready)
break;
}
if (!ready) {
sysfs_notify(&sbefifo->dev.kobj, NULL, dev_attr_timeout.attr.name);
sbefifo->timed_out = true;
dev_err(dev, "%s FIFO Timeout (%u ms)! status=%08x\n",
up ? "UP" : "DOWN", jiffies_to_msecs(timeout), sts);
return -ETIMEDOUT;
}
dev_vdbg(dev, "End of wait status: %08x\n", sts);
sbefifo->timed_out = false;
*status = sts;
return 0;
}
static int sbefifo_send_command(struct sbefifo *sbefifo,
const __be32 *command, size_t cmd_len)
{
struct device *dev = &sbefifo->fsi_dev->dev;
size_t len, chunk, vacant = 0, remaining = cmd_len;
unsigned long timeout;
u32 status;
int rc;
dev_dbg(dev, "sending command (%zd words, cmd=%04x)\n",
cmd_len, be32_to_cpu(command[1]));
/* As long as there's something to send */
timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_CMD);
while (remaining) {
/* Wait for room in the FIFO */
rc = sbefifo_wait(sbefifo, true, &status, timeout);
if (rc < 0)
return rc;
timeout = msecs_to_jiffies(sbefifo->timeout_in_cmd_ms);
vacant = sbefifo_vacant(status);
len = chunk = min(vacant, remaining);
dev_vdbg(dev, " status=%08x vacant=%zd chunk=%zd\n",
status, vacant, chunk);
/* Write as much as we can */
while (len--) {
rc = sbefifo_up_write(sbefifo, *(command++));
if (rc) {
dev_err(dev, "FSI error %d writing UP FIFO\n", rc);
return rc;
}
}
remaining -= chunk;
vacant -= chunk;
}
/* If there's no room left, wait for some to write EOT */
if (!vacant) {
rc = sbefifo_wait(sbefifo, true, &status, timeout);
if (rc)
return rc;
}
/* Send an EOT */
rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_EOT_RAISE, 0);
if (rc)
dev_err(dev, "FSI error %d writing EOT\n", rc);
return rc;
}
static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *response)
{
struct device *dev = &sbefifo->fsi_dev->dev;
u32 status, eot_set;
unsigned long timeout;
bool overflow = false;
__be32 data;
size_t len;
int rc;
dev_dbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
timeout = msecs_to_jiffies(sbefifo->timeout_start_rsp_ms);
for (;;) {
/* Grab FIFO status (this will handle parity errors) */
rc = sbefifo_wait(sbefifo, false, &status, timeout);
if (rc < 0) {
dev_dbg(dev, "timeout waiting (%u ms)\n", jiffies_to_msecs(timeout));
return rc;
}
timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_RSP);
/* Decode status */
len = sbefifo_populated(status);
eot_set = sbefifo_eot_set(status);
dev_dbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set);
/* Go through the chunk */
while(len--) {
/* Read the data */
rc = sbefifo_down_read(sbefifo, &data);
if (rc < 0)
return rc;
/* Was it an EOT ? */
if (eot_set & 0x80) {
/*
* There should be nothing else in the FIFO,
* if there is, mark broken, this will force
* a reset on next use, but don't fail the
* command.
*/
if (len) {
dev_warn(dev, "FIFO read hit"
" EOT with still %zd data\n",
len);
sbefifo->broken = true;
}
/* We are done */
rc = sbefifo_regw(sbefifo,
SBEFIFO_DOWN | SBEFIFO_EOT_ACK, 0);
/*
* If that write fail, still complete the request but mark
* the fifo as broken for subsequent reset (not much else
* we can do here).
*/
if (rc) {
dev_err(dev, "FSI error %d ack'ing EOT\n", rc);
sbefifo->broken = true;
}
/* Tell whether we overflowed */
return overflow ? -EOVERFLOW : 0;
}
/* Store it if there is room */
if (iov_iter_count(response) >= sizeof(__be32)) {
if (copy_to_iter(&data, sizeof(__be32), response) < sizeof(__be32))
return -EFAULT;
} else {
dev_vdbg(dev, "Response overflowed !\n");
overflow = true;
}
/* Next EOT bit */
eot_set <<= 1;
}
}
/* Shouldn't happen */
return -EIO;
}
static int sbefifo_do_command(struct sbefifo *sbefifo,
const __be32 *command, size_t cmd_len,
struct iov_iter *response)
{
/* Try sending the command */
int rc = sbefifo_send_command(sbefifo, command, cmd_len);
if (rc)
return rc;
/* Now, get the response */
return sbefifo_read_response(sbefifo, response);
}
static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
{
struct device *dev = &sbefifo->fsi_dev->dev;
struct iov_iter ffdc_iter;
struct kvec ffdc_iov;
__be32 *ffdc;
size_t ffdc_sz;
__be32 cmd[2];
int rc;
sbefifo->async_ffdc = false;
ffdc = vmalloc(SBEFIFO_MAX_FFDC_SIZE);
if (!ffdc) {
dev_err(dev, "Failed to allocate SBE FFDC buffer\n");
return;
}
ffdc_iov.iov_base = ffdc;
ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
iov_iter_kvec(&ffdc_iter, ITER_DEST, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
cmd[0] = cpu_to_be32(2);
cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
if (rc != 0) {
dev_err(dev, "Error %d retrieving SBE FFDC\n", rc);
goto bail;
}
ffdc_sz = SBEFIFO_MAX_FFDC_SIZE - iov_iter_count(&ffdc_iter);
ffdc_sz /= sizeof(__be32);
rc = sbefifo_parse_status(dev, SBEFIFO_CMD_GET_SBE_FFDC, ffdc,
ffdc_sz, &ffdc_sz);
if (rc != 0) {
dev_err(dev, "Error %d decoding SBE FFDC\n", rc);
goto bail;
}
if (ffdc_sz > 0)
sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, true);
bail:
vfree(ffdc);
}
static int __sbefifo_submit(struct sbefifo *sbefifo,
const __be32 *command, size_t cmd_len,
struct iov_iter *response)
{
struct device *dev = &sbefifo->fsi_dev->dev;
int rc;
if (sbefifo->dead)
return -ENODEV;
if (cmd_len < 2 || be32_to_cpu(command[0]) != cmd_len) {
dev_vdbg(dev, "Invalid command len %zd (header: %d)\n",
cmd_len, be32_to_cpu(command[0]));
return -EINVAL;
}
/* First ensure the HW is in a clean state */
rc = sbefifo_cleanup_hw(sbefifo);
if (rc)
return rc;
/* Look for async FFDC first if any */
if (sbefifo->async_ffdc)
sbefifo_collect_async_ffdc(sbefifo);
rc = sbefifo_do_command(sbefifo, command, cmd_len, response);
if (rc != 0 && rc != -EOVERFLOW)
goto fail;
return rc;
fail:
/*
* On failure, attempt a reset. Ignore the result, it will mark
* the fifo broken if the reset fails
*/
sbefifo_request_reset(sbefifo);
/* Return original error */
return rc;
}
/**
* sbefifo_submit() - Submit and SBE fifo command and receive response
* @dev: The sbefifo device
* @command: The raw command data
* @cmd_len: The command size (in 32-bit words)
* @response: The output response buffer
* @resp_len: In: Response buffer size, Out: Response size
*
* This will perform the entire operation. If the response buffer
* overflows, returns -EOVERFLOW
*/
int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
__be32 *response, size_t *resp_len)
{
struct sbefifo *sbefifo;
struct iov_iter resp_iter;
struct kvec resp_iov;
size_t rbytes;
int rc;
if (!dev)
return -ENODEV;
sbefifo = dev_get_drvdata(dev);
if (!sbefifo)
return -ENODEV;
if (WARN_ON_ONCE(sbefifo->magic != SBEFIFO_MAGIC))
return -ENODEV;
if (!resp_len || !command || !response)
return -EINVAL;
/* Prepare iov iterator */
rbytes = (*resp_len) * sizeof(__be32);
resp_iov.iov_base = response;
resp_iov.iov_len = rbytes;
iov_iter_kvec(&resp_iter, ITER_DEST, &resp_iov, 1, rbytes);
/* Perform the command */
rc = mutex_lock_interruptible(&sbefifo->lock);
if (rc)
return rc;
rc = __sbefifo_submit(sbefifo, command, cmd_len, &resp_iter);
mutex_unlock(&sbefifo->lock);
/* Extract the response length */
rbytes -= iov_iter_count(&resp_iter);
*resp_len = rbytes / sizeof(__be32);
return rc;
}
EXPORT_SYMBOL_GPL(sbefifo_submit);
/*
* Char device interface
*/
static void sbefifo_release_command(struct sbefifo_user *user)
{
if (is_vmalloc_addr(user->pending_cmd))
vfree(user->pending_cmd);
user->pending_cmd = NULL;
user->pending_len = 0;
}
static int sbefifo_user_open(struct inode *inode, struct file *file)
{
struct sbefifo *sbefifo = container_of(inode->i_cdev, struct sbefifo, cdev);
struct sbefifo_user *user;
user = kzalloc(sizeof(struct sbefifo_user), GFP_KERNEL);
if (!user)
return -ENOMEM;
file->private_data = user;
user->sbefifo = sbefifo;
user->cmd_page = (void *)__get_free_page(GFP_KERNEL);
if (!user->cmd_page) {
kfree(user);
return -ENOMEM;
}
mutex_init(&user->file_lock);
user->cmd_timeout_ms = SBEFIFO_TIMEOUT_IN_CMD;
user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP;
return 0;
}
static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
struct sbefifo_user *user = file->private_data;
struct sbefifo *sbefifo;
struct iov_iter resp_iter;
struct iovec resp_iov;
size_t cmd_len;
int rc;
if (!user)
return -EINVAL;
sbefifo = user->sbefifo;
if (len & 3)
return -EINVAL;
mutex_lock(&user->file_lock);
/* Cronus relies on -EAGAIN after a short read */
if (user->pending_len == 0) {
rc = -EAGAIN;
goto bail;
}
if (user->pending_len < 8) {
rc = -EINVAL;
goto bail;
}
cmd_len = user->pending_len >> 2;
/* Prepare iov iterator */
resp_iov.iov_base = buf;
resp_iov.iov_len = len;
iov_iter_init(&resp_iter, ITER_DEST, &resp_iov, 1, len);
/* Perform the command */
rc = mutex_lock_interruptible(&sbefifo->lock);
if (rc)
goto bail;
sbefifo->timeout_in_cmd_ms = user->cmd_timeout_ms;
sbefifo->timeout_start_rsp_ms = user->read_timeout_ms;
rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
sbefifo->timeout_start_rsp_ms = SBEFIFO_TIMEOUT_START_RSP;
sbefifo->timeout_in_cmd_ms = SBEFIFO_TIMEOUT_IN_CMD;
mutex_unlock(&sbefifo->lock);
if (rc < 0)
goto bail;
/* Extract the response length */
rc = len - iov_iter_count(&resp_iter);
bail:
sbefifo_release_command(user);
mutex_unlock(&user->file_lock);
return rc;
}
static ssize_t sbefifo_user_write(struct file *file, const char __user *buf,
size_t len, loff_t *offset)
{
struct sbefifo_user *user = file->private_data;
struct sbefifo *sbefifo;
int rc = len;
if (!user)
return -EINVAL;
sbefifo = user->sbefifo;
if (len > SBEFIFO_MAX_USER_CMD_LEN)
return -EINVAL;
if (len & 3)
return -EINVAL;
mutex_lock(&user->file_lock);
/* Can we use the pre-allocate buffer ? If not, allocate */
if (len <= PAGE_SIZE)
user->pending_cmd = user->cmd_page;
else
user->pending_cmd = vmalloc(len);
if (!user->pending_cmd) {
rc = -ENOMEM;
goto bail;
}
/* Copy the command into the staging buffer */
if (copy_from_user(user->pending_cmd, buf, len)) {
rc = -EFAULT;
goto bail;
}
/* Check for the magic reset command */
if (len == 4 && be32_to_cpu(*(__be32 *)user->pending_cmd) ==
SBEFIFO_RESET_MAGIC) {
/* Clear out any pending command */
user->pending_len = 0;
/* Trigger reset request */
rc = mutex_lock_interruptible(&sbefifo->lock);
if (rc)
goto bail;
rc = sbefifo_request_reset(user->sbefifo);
mutex_unlock(&sbefifo->lock);
if (rc == 0)
rc = 4;
goto bail;
}
/* Update the staging buffer size */
user->pending_len = len;
bail:
if (!user->pending_len)
sbefifo_release_command(user);
mutex_unlock(&user->file_lock);
/* And that's it, we'll issue the command on a read */
return rc;
}
static int sbefifo_user_release(struct inode *inode, struct file *file)
{
struct sbefifo_user *user = file->private_data;
if (!user)
return -EINVAL;
sbefifo_release_command(user);
free_page((unsigned long)user->cmd_page);
kfree(user);
return 0;
}
static int sbefifo_cmd_timeout(struct sbefifo_user *user, void __user *argp)
{
struct device *dev = &user->sbefifo->dev;
u32 timeout;
if (get_user(timeout, (__u32 __user *)argp))
return -EFAULT;
if (timeout == 0) {
user->cmd_timeout_ms = SBEFIFO_TIMEOUT_IN_CMD;
dev_dbg(dev, "Command timeout reset to %us\n", user->cmd_timeout_ms / 1000);
return 0;
}
user->cmd_timeout_ms = timeout * 1000; /* user timeout is in sec */
dev_dbg(dev, "Command timeout set to %us\n", timeout);
return 0;
}
static int sbefifo_read_timeout(struct sbefifo_user *user, void __user *argp)
{
struct device *dev = &user->sbefifo->dev;
u32 timeout;
if (get_user(timeout, (__u32 __user *)argp))
return -EFAULT;
if (timeout == 0) {
user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP;
dev_dbg(dev, "Timeout reset to %us\n", user->read_timeout_ms / 1000);
return 0;
}
user->read_timeout_ms = timeout * 1000; /* user timeout is in sec */
dev_dbg(dev, "Timeout set to %us\n", timeout);
return 0;
}
static long sbefifo_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct sbefifo_user *user = file->private_data;
int rc = -ENOTTY;
if (!user)
return -EINVAL;
mutex_lock(&user->file_lock);
switch (cmd) {
case FSI_SBEFIFO_CMD_TIMEOUT_SECONDS:
rc = sbefifo_cmd_timeout(user, (void __user *)arg);
break;
case FSI_SBEFIFO_READ_TIMEOUT_SECONDS:
rc = sbefifo_read_timeout(user, (void __user *)arg);
break;
}
mutex_unlock(&user->file_lock);
return rc;
}
static const struct file_operations sbefifo_fops = {
.owner = THIS_MODULE,
.open = sbefifo_user_open,
.read = sbefifo_user_read,
.write = sbefifo_user_write,
.release = sbefifo_user_release,
.unlocked_ioctl = sbefifo_user_ioctl,
};
static void sbefifo_free(struct device *dev)
{
struct sbefifo *sbefifo = container_of(dev, struct sbefifo, dev);
put_device(&sbefifo->fsi_dev->dev);
kfree(sbefifo);
}
/*
* Probe/remove
*/
static int sbefifo_probe(struct device *dev)
{
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct sbefifo *sbefifo;
struct device_node *np;
struct platform_device *child;
char child_name[32];
int rc, didx, child_idx = 0;
dev_dbg(dev, "Found sbefifo device\n");
sbefifo = kzalloc(sizeof(*sbefifo), GFP_KERNEL);
if (!sbefifo)
return -ENOMEM;
/* Grab a reference to the device (parent of our cdev), we'll drop it later */
if (!get_device(dev)) {
kfree(sbefifo);
return -ENODEV;
}
sbefifo->magic = SBEFIFO_MAGIC;
sbefifo->fsi_dev = fsi_dev;
dev_set_drvdata(dev, sbefifo);
mutex_init(&sbefifo->lock);
sbefifo->timeout_in_cmd_ms = SBEFIFO_TIMEOUT_IN_CMD;
sbefifo->timeout_start_rsp_ms = SBEFIFO_TIMEOUT_START_RSP;
/* Create chardev for userspace access */
sbefifo->dev.type = &fsi_cdev_type;
sbefifo->dev.parent = dev;
sbefifo->dev.release = sbefifo_free;
device_initialize(&sbefifo->dev);
/* Allocate a minor in the FSI space */
rc = fsi_get_new_minor(fsi_dev, fsi_dev_sbefifo, &sbefifo->dev.devt, &didx);
if (rc)
goto err;
dev_set_name(&sbefifo->dev, "sbefifo%d", didx);
cdev_init(&sbefifo->cdev, &sbefifo_fops);
rc = cdev_device_add(&sbefifo->cdev, &sbefifo->dev);
if (rc) {
dev_err(dev, "Error %d creating char device %s\n",
rc, dev_name(&sbefifo->dev));
goto err_free_minor;
}
/* Create platform devs for dts child nodes (occ, etc) */
for_each_available_child_of_node(dev->of_node, np) {
snprintf(child_name, sizeof(child_name), "%s-dev%d",
dev_name(&sbefifo->dev), child_idx++);
child = of_platform_device_create(np, child_name, dev);
if (!child)
dev_warn(dev, "failed to create child %s dev\n",
child_name);
}
device_create_file(&sbefifo->dev, &dev_attr_timeout);
return 0;
err_free_minor:
fsi_free_minor(sbefifo->dev.devt);
err:
put_device(&sbefifo->dev);
return rc;
}
static int sbefifo_unregister_child(struct device *dev, void *data)
{
struct platform_device *child = to_platform_device(dev);
of_device_unregister(child);
if (dev->of_node)
of_node_clear_flag(dev->of_node, OF_POPULATED);
return 0;
}
static int sbefifo_remove(struct device *dev)
{
struct sbefifo *sbefifo = dev_get_drvdata(dev);
dev_dbg(dev, "Removing sbefifo device...\n");
device_remove_file(&sbefifo->dev, &dev_attr_timeout);
mutex_lock(&sbefifo->lock);
sbefifo->dead = true;
mutex_unlock(&sbefifo->lock);
cdev_device_del(&sbefifo->cdev, &sbefifo->dev);
fsi_free_minor(sbefifo->dev.devt);
device_for_each_child(dev, NULL, sbefifo_unregister_child);
put_device(&sbefifo->dev);
return 0;
}
static const struct fsi_device_id sbefifo_ids[] = {
{
.engine_type = FSI_ENGID_SBE,
.version = FSI_VERSION_ANY,
},
{ 0 }
};
static struct fsi_driver sbefifo_drv = {
.id_table = sbefifo_ids,
.drv = {
.name = DEVICE_NAME,
.bus = &fsi_bus_type,
.probe = sbefifo_probe,
.remove = sbefifo_remove,
}
};
static int sbefifo_init(void)
{
return fsi_driver_register(&sbefifo_drv);
}
static void sbefifo_exit(void)
{
fsi_driver_unregister(&sbefifo_drv);
}
module_init(sbefifo_init);
module_exit(sbefifo_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Brad Bishop <[email protected]>");
MODULE_AUTHOR("Eddie James <[email protected]>");
MODULE_AUTHOR("Andrew Jeffery <[email protected]>");
MODULE_AUTHOR("Benjamin Herrenschmidt <[email protected]>");
MODULE_DESCRIPTION("Linux device interface to the POWER Self Boot Engine");
| linux-master | drivers/fsi/fsi-sbefifo.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* FSI core driver
*
* Copyright (C) IBM Corporation 2016
*
* TODO:
* - Rework topology
* - s/chip_id/chip_loc
* - s/cfam/chip (cfam_id -> chip_id etc...)
*/
#include <linux/crc4.h>
#include <linux/device.h>
#include <linux/fsi.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include "fsi-master.h"
#include "fsi-slave.h"
#define CREATE_TRACE_POINTS
#include <trace/events/fsi.h>
#define FSI_SLAVE_CONF_NEXT_MASK GENMASK(31, 31)
#define FSI_SLAVE_CONF_SLOTS_MASK GENMASK(23, 16)
#define FSI_SLAVE_CONF_SLOTS_SHIFT 16
#define FSI_SLAVE_CONF_VERSION_MASK GENMASK(15, 12)
#define FSI_SLAVE_CONF_VERSION_SHIFT 12
#define FSI_SLAVE_CONF_TYPE_MASK GENMASK(11, 4)
#define FSI_SLAVE_CONF_TYPE_SHIFT 4
#define FSI_SLAVE_CONF_CRC_SHIFT 4
#define FSI_SLAVE_CONF_CRC_MASK GENMASK(3, 0)
#define FSI_SLAVE_CONF_DATA_BITS 28
#define FSI_PEEK_BASE 0x410
static const int engine_page_size = 0x400;
#define FSI_SLAVE_BASE 0x800
/*
* FSI slave engine control register offsets
*/
#define FSI_SMODE 0x0 /* R/W: Mode register */
#define FSI_SISC 0x8 /* R/W: Interrupt condition */
#define FSI_SSTAT 0x14 /* R : Slave status */
#define FSI_SLBUS 0x30 /* W : LBUS Ownership */
#define FSI_LLMODE 0x100 /* R/W: Link layer mode register */
/*
* SMODE fields
*/
#define FSI_SMODE_WSC 0x80000000 /* Warm start done */
#define FSI_SMODE_ECRC 0x20000000 /* Hw CRC check */
#define FSI_SMODE_SID_SHIFT 24 /* ID shift */
#define FSI_SMODE_SID_MASK 3 /* ID Mask */
#define FSI_SMODE_ED_SHIFT 20 /* Echo delay shift */
#define FSI_SMODE_ED_MASK 0xf /* Echo delay mask */
#define FSI_SMODE_SD_SHIFT 16 /* Send delay shift */
#define FSI_SMODE_SD_MASK 0xf /* Send delay mask */
#define FSI_SMODE_LBCRR_SHIFT 8 /* Clk ratio shift */
#define FSI_SMODE_LBCRR_MASK 0xf /* Clk ratio mask */
/*
* SLBUS fields
*/
#define FSI_SLBUS_FORCE 0x80000000 /* Force LBUS ownership */
/*
* LLMODE fields
*/
#define FSI_LLMODE_ASYNC 0x1
#define FSI_SLAVE_SIZE_23b 0x800000
static DEFINE_IDA(master_ida);
static const int slave_retries = 2;
static int discard_errors;
static dev_t fsi_base_dev;
static DEFINE_IDA(fsi_minor_ida);
#define FSI_CHAR_MAX_DEVICES 0x1000
/* Legacy /dev numbering: 4 devices per chip, 16 chips */
#define FSI_CHAR_LEGACY_TOP 64
static int fsi_master_read(struct fsi_master *master, int link,
uint8_t slave_id, uint32_t addr, void *val, size_t size);
static int fsi_master_write(struct fsi_master *master, int link,
uint8_t slave_id, uint32_t addr, const void *val, size_t size);
static int fsi_master_break(struct fsi_master *master, int link);
/*
* fsi_device_read() / fsi_device_write() / fsi_device_peek()
*
* FSI endpoint-device support
*
* Read / write / peek accessors for a client
*
* Parameters:
* dev: Structure passed to FSI client device drivers on probe().
* addr: FSI address of given device. Client should pass in its base address
* plus desired offset to access its register space.
* val: For read/peek this is the value read at the specified address. For
* write this is value to write to the specified address.
* The data in val must be FSI bus endian (big endian).
* size: Size in bytes of the operation. Sizes supported are 1, 2 and 4 bytes.
* Addresses must be aligned on size boundaries or an error will result.
*/
int fsi_device_read(struct fsi_device *dev, uint32_t addr, void *val,
size_t size)
{
if (addr > dev->size || size > dev->size || addr > dev->size - size)
return -EINVAL;
return fsi_slave_read(dev->slave, dev->addr + addr, val, size);
}
EXPORT_SYMBOL_GPL(fsi_device_read);
int fsi_device_write(struct fsi_device *dev, uint32_t addr, const void *val,
size_t size)
{
if (addr > dev->size || size > dev->size || addr > dev->size - size)
return -EINVAL;
return fsi_slave_write(dev->slave, dev->addr + addr, val, size);
}
EXPORT_SYMBOL_GPL(fsi_device_write);
int fsi_device_peek(struct fsi_device *dev, void *val)
{
uint32_t addr = FSI_PEEK_BASE + ((dev->unit - 2) * sizeof(uint32_t));
return fsi_slave_read(dev->slave, addr, val, sizeof(uint32_t));
}
static void fsi_device_release(struct device *_device)
{
struct fsi_device *device = to_fsi_dev(_device);
of_node_put(device->dev.of_node);
kfree(device);
}
static struct fsi_device *fsi_create_device(struct fsi_slave *slave)
{
struct fsi_device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->dev.parent = &slave->dev;
dev->dev.bus = &fsi_bus_type;
dev->dev.release = fsi_device_release;
return dev;
}
/* FSI slave support */
static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
uint8_t *idp)
{
uint32_t addr = *addrp;
uint8_t id = *idp;
if (addr > slave->size)
return -EINVAL;
/* For 23 bit addressing, we encode the extra two bits in the slave
* id (and the slave's actual ID needs to be 0).
*/
if (addr > 0x1fffff) {
if (slave->id != 0)
return -EINVAL;
id = (addr >> 21) & 0x3;
addr &= 0x1fffff;
}
*addrp = addr;
*idp = id;
return 0;
}
static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
{
struct fsi_master *master = slave->master;
__be32 irq, stat;
int rc, link;
uint8_t id;
link = slave->link;
id = slave->id;
rc = fsi_master_read(master, link, id, FSI_SLAVE_BASE + FSI_SISC,
&irq, sizeof(irq));
if (rc)
return rc;
rc = fsi_master_read(master, link, id, FSI_SLAVE_BASE + FSI_SSTAT,
&stat, sizeof(stat));
if (rc)
return rc;
dev_dbg(&slave->dev, "status: 0x%08x, sisc: 0x%08x\n",
be32_to_cpu(stat), be32_to_cpu(irq));
/* clear interrupts */
return fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SISC,
&irq, sizeof(irq));
}
/* Encode slave local bus echo delay */
static inline uint32_t fsi_smode_echodly(int x)
{
return (x & FSI_SMODE_ED_MASK) << FSI_SMODE_ED_SHIFT;
}
/* Encode slave local bus send delay */
static inline uint32_t fsi_smode_senddly(int x)
{
return (x & FSI_SMODE_SD_MASK) << FSI_SMODE_SD_SHIFT;
}
/* Encode slave local bus clock rate ratio */
static inline uint32_t fsi_smode_lbcrr(int x)
{
return (x & FSI_SMODE_LBCRR_MASK) << FSI_SMODE_LBCRR_SHIFT;
}
/* Encode slave ID */
static inline uint32_t fsi_smode_sid(int x)
{
return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
}
static uint32_t fsi_slave_smode(int id, u8 t_senddly, u8 t_echodly)
{
return FSI_SMODE_WSC | FSI_SMODE_ECRC
| fsi_smode_sid(id)
| fsi_smode_echodly(t_echodly - 1) | fsi_smode_senddly(t_senddly - 1)
| fsi_smode_lbcrr(0x8);
}
static int fsi_slave_set_smode(struct fsi_slave *slave)
{
uint32_t smode;
__be32 data;
/* set our smode register with the slave ID field to 0; this enables
* extended slave addressing
*/
smode = fsi_slave_smode(slave->id, slave->t_send_delay, slave->t_echo_delay);
data = cpu_to_be32(smode);
return fsi_master_write(slave->master, slave->link, slave->id,
FSI_SLAVE_BASE + FSI_SMODE,
&data, sizeof(data));
}
static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
uint32_t addr, size_t size)
{
struct fsi_master *master = slave->master;
int rc, link;
uint32_t reg;
uint8_t id, send_delay, echo_delay;
if (discard_errors)
return -1;
link = slave->link;
id = slave->id;
dev_dbg(&slave->dev, "handling error on %s to 0x%08x[%zd]",
write ? "write" : "read", addr, size);
/* try a simple clear of error conditions, which may fail if we've lost
* communication with the slave
*/
rc = fsi_slave_report_and_clear_errors(slave);
if (!rc)
return 0;
/* send a TERM and retry */
if (master->term) {
rc = master->term(master, link, id);
if (!rc) {
rc = fsi_master_read(master, link, id, 0,
®, sizeof(reg));
if (!rc)
rc = fsi_slave_report_and_clear_errors(slave);
if (!rc)
return 0;
}
}
send_delay = slave->t_send_delay;
echo_delay = slave->t_echo_delay;
/* getting serious, reset the slave via BREAK */
rc = fsi_master_break(master, link);
if (rc)
return rc;
slave->t_send_delay = send_delay;
slave->t_echo_delay = echo_delay;
rc = fsi_slave_set_smode(slave);
if (rc)
return rc;
if (master->link_config)
master->link_config(master, link,
slave->t_send_delay,
slave->t_echo_delay);
return fsi_slave_report_and_clear_errors(slave);
}
int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
void *val, size_t size)
{
uint8_t id = slave->id;
int rc, err_rc, i;
rc = fsi_slave_calc_addr(slave, &addr, &id);
if (rc)
return rc;
for (i = 0; i < slave_retries; i++) {
rc = fsi_master_read(slave->master, slave->link,
id, addr, val, size);
if (!rc)
break;
err_rc = fsi_slave_handle_error(slave, false, addr, size);
if (err_rc)
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(fsi_slave_read);
int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
const void *val, size_t size)
{
uint8_t id = slave->id;
int rc, err_rc, i;
rc = fsi_slave_calc_addr(slave, &addr, &id);
if (rc)
return rc;
for (i = 0; i < slave_retries; i++) {
rc = fsi_master_write(slave->master, slave->link,
id, addr, val, size);
if (!rc)
break;
err_rc = fsi_slave_handle_error(slave, true, addr, size);
if (err_rc)
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(fsi_slave_write);
int fsi_slave_claim_range(struct fsi_slave *slave,
uint32_t addr, uint32_t size)
{
if (addr + size < addr)
return -EINVAL;
if (addr + size > slave->size)
return -EINVAL;
/* todo: check for overlapping claims */
return 0;
}
EXPORT_SYMBOL_GPL(fsi_slave_claim_range);
void fsi_slave_release_range(struct fsi_slave *slave,
uint32_t addr, uint32_t size)
{
}
EXPORT_SYMBOL_GPL(fsi_slave_release_range);
static bool fsi_device_node_matches(struct device *dev, struct device_node *np,
uint32_t addr, uint32_t size)
{
u64 paddr, psize;
if (of_property_read_reg(np, 0, &paddr, &psize))
return false;
if (paddr != addr)
return false;
if (psize != size) {
dev_warn(dev,
"node %pOF matches probed address, but not size (got 0x%llx, expected 0x%x)",
np, psize, size);
}
return true;
}
/* Find a matching node for the slave engine at @address, using @size bytes
* of space. Returns NULL if not found, or a matching node with refcount
* already incremented.
*/
static struct device_node *fsi_device_find_of_node(struct fsi_device *dev)
{
struct device_node *parent, *np;
parent = dev_of_node(&dev->slave->dev);
if (!parent)
return NULL;
for_each_child_of_node(parent, np) {
if (fsi_device_node_matches(&dev->dev, np,
dev->addr, dev->size))
return np;
}
return NULL;
}
static int fsi_slave_scan(struct fsi_slave *slave)
{
uint32_t engine_addr;
int rc, i;
/*
* scan engines
*
* We keep the peek mode and slave engines for the core; so start
* at the third slot in the configuration table. We also need to
* skip the chip ID entry at the start of the address space.
*/
engine_addr = engine_page_size * 3;
for (i = 2; i < engine_page_size / sizeof(uint32_t); i++) {
uint8_t slots, version, type, crc;
struct fsi_device *dev;
uint32_t conf;
__be32 data;
rc = fsi_slave_read(slave, (i + 1) * sizeof(data),
&data, sizeof(data));
if (rc) {
dev_warn(&slave->dev,
"error reading slave registers\n");
return -1;
}
conf = be32_to_cpu(data);
crc = crc4(0, conf, 32);
if (crc) {
dev_warn(&slave->dev,
"crc error in slave register at 0x%04x\n",
i);
return -1;
}
slots = (conf & FSI_SLAVE_CONF_SLOTS_MASK)
>> FSI_SLAVE_CONF_SLOTS_SHIFT;
version = (conf & FSI_SLAVE_CONF_VERSION_MASK)
>> FSI_SLAVE_CONF_VERSION_SHIFT;
type = (conf & FSI_SLAVE_CONF_TYPE_MASK)
>> FSI_SLAVE_CONF_TYPE_SHIFT;
/*
* Unused address areas are marked by a zero type value; this
* skips the defined address areas
*/
if (type != 0 && slots != 0) {
/* create device */
dev = fsi_create_device(slave);
if (!dev)
return -ENOMEM;
dev->slave = slave;
dev->engine_type = type;
dev->version = version;
dev->unit = i;
dev->addr = engine_addr;
dev->size = slots * engine_page_size;
trace_fsi_dev_init(dev);
dev_dbg(&slave->dev,
"engine[%i]: type %x, version %x, addr %x size %x\n",
dev->unit, dev->engine_type, version,
dev->addr, dev->size);
dev_set_name(&dev->dev, "%02x:%02x:%02x:%02x",
slave->master->idx, slave->link,
slave->id, i - 2);
dev->dev.of_node = fsi_device_find_of_node(dev);
rc = device_register(&dev->dev);
if (rc) {
dev_warn(&slave->dev, "add failed: %d\n", rc);
put_device(&dev->dev);
}
}
engine_addr += slots * engine_page_size;
if (!(conf & FSI_SLAVE_CONF_NEXT_MASK))
break;
}
return 0;
}
static unsigned long aligned_access_size(size_t offset, size_t count)
{
unsigned long offset_unit, count_unit;
/* Criteria:
*
* 1. Access size must be less than or equal to the maximum access
* width or the highest power-of-two factor of offset
* 2. Access size must be less than or equal to the amount specified by
* count
*
* The access width is optimal if we can calculate 1 to be strictly
* equal while still satisfying 2.
*/
/* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
offset_unit = BIT(__builtin_ctzl(offset | 4));
/* Find 2 by the top bit of count */
count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
/* Constrain the maximum access width to the minimum of both criteria */
return BIT(__builtin_ctzl(offset_unit | count_unit));
}
static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
size_t total_len, read_len;
int rc;
if (off < 0)
return -EINVAL;
if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
return -EINVAL;
for (total_len = 0; total_len < count; total_len += read_len) {
read_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_read(slave, off, buf + total_len, read_len);
if (rc)
return rc;
off += read_len;
}
return count;
}
static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
struct kobject *kobj, struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct fsi_slave *slave = to_fsi_slave(kobj_to_dev(kobj));
size_t total_len, write_len;
int rc;
if (off < 0)
return -EINVAL;
if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
return -EINVAL;
for (total_len = 0; total_len < count; total_len += write_len) {
write_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_write(slave, off, buf + total_len, write_len);
if (rc)
return rc;
off += write_len;
}
return count;
}
static const struct bin_attribute fsi_slave_raw_attr = {
.attr = {
.name = "raw",
.mode = 0600,
},
.size = 0,
.read = fsi_slave_sysfs_raw_read,
.write = fsi_slave_sysfs_raw_write,
};
static void fsi_slave_release(struct device *dev)
{
struct fsi_slave *slave = to_fsi_slave(dev);
fsi_free_minor(slave->dev.devt);
of_node_put(dev->of_node);
kfree(slave);
}
static bool fsi_slave_node_matches(struct device_node *np,
int link, uint8_t id)
{
u64 addr;
if (of_property_read_reg(np, 0, &addr, NULL))
return false;
return addr == (((u64)link << 32) | id);
}
/* Find a matching node for the slave at (link, id). Returns NULL if none
* found, or a matching node with refcount already incremented.
*/
static struct device_node *fsi_slave_find_of_node(struct fsi_master *master,
int link, uint8_t id)
{
struct device_node *parent, *np;
parent = dev_of_node(&master->dev);
if (!parent)
return NULL;
for_each_child_of_node(parent, np) {
if (fsi_slave_node_matches(np, link, id))
return np;
}
return NULL;
}
static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
loff_t *offset)
{
struct fsi_slave *slave = filep->private_data;
size_t total_len, read_len;
loff_t off = *offset;
ssize_t rc;
if (off < 0)
return -EINVAL;
if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
return -EINVAL;
for (total_len = 0; total_len < count; total_len += read_len) {
__be32 data;
read_len = min_t(size_t, count, 4);
read_len -= off & 0x3;
rc = fsi_slave_read(slave, off, &data, read_len);
if (rc)
goto fail;
rc = copy_to_user(buf + total_len, &data, read_len);
if (rc) {
rc = -EFAULT;
goto fail;
}
off += read_len;
}
rc = count;
fail:
*offset = off;
return rc;
}
static ssize_t cfam_write(struct file *filep, const char __user *buf,
size_t count, loff_t *offset)
{
struct fsi_slave *slave = filep->private_data;
size_t total_len, write_len;
loff_t off = *offset;
ssize_t rc;
if (off < 0)
return -EINVAL;
if (off > 0xffffffff || count > 0xffffffff || off + count > 0xffffffff)
return -EINVAL;
for (total_len = 0; total_len < count; total_len += write_len) {
__be32 data;
write_len = min_t(size_t, count, 4);
write_len -= off & 0x3;
rc = copy_from_user(&data, buf + total_len, write_len);
if (rc) {
rc = -EFAULT;
goto fail;
}
rc = fsi_slave_write(slave, off, &data, write_len);
if (rc)
goto fail;
off += write_len;
}
rc = count;
fail:
*offset = off;
return rc;
}
static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)
{
switch (whence) {
case SEEK_CUR:
break;
case SEEK_SET:
file->f_pos = offset;
break;
default:
return -EINVAL;
}
return offset;
}
static int cfam_open(struct inode *inode, struct file *file)
{
struct fsi_slave *slave = container_of(inode->i_cdev, struct fsi_slave, cdev);
file->private_data = slave;
return 0;
}
static const struct file_operations cfam_fops = {
.owner = THIS_MODULE,
.open = cfam_open,
.llseek = cfam_llseek,
.read = cfam_read,
.write = cfam_write,
};
static ssize_t send_term_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsi_slave *slave = to_fsi_slave(dev);
struct fsi_master *master = slave->master;
if (!master->term)
return -ENODEV;
master->term(master, slave->link, slave->id);
return count;
}
static DEVICE_ATTR_WO(send_term);
static ssize_t slave_send_echo_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct fsi_slave *slave = to_fsi_slave(dev);
return sprintf(buf, "%u\n", slave->t_send_delay);
}
static ssize_t slave_send_echo_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fsi_slave *slave = to_fsi_slave(dev);
struct fsi_master *master = slave->master;
unsigned long val;
int rc;
if (kstrtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val < 1 || val > 16)
return -EINVAL;
if (!master->link_config)
return -ENXIO;
/* Current HW mandates that send and echo delay are identical */
slave->t_send_delay = val;
slave->t_echo_delay = val;
rc = fsi_slave_set_smode(slave);
if (rc < 0)
return rc;
if (master->link_config)
master->link_config(master, slave->link,
slave->t_send_delay,
slave->t_echo_delay);
return count;
}
static DEVICE_ATTR(send_echo_delays, 0600,
slave_send_echo_show, slave_send_echo_store);
static ssize_t chip_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct fsi_slave *slave = to_fsi_slave(dev);
return sprintf(buf, "%d\n", slave->chip_id);
}
static DEVICE_ATTR_RO(chip_id);
static ssize_t cfam_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct fsi_slave *slave = to_fsi_slave(dev);
return sprintf(buf, "0x%x\n", slave->cfam_id);
}
static DEVICE_ATTR_RO(cfam_id);
static struct attribute *cfam_attr[] = {
&dev_attr_send_echo_delays.attr,
&dev_attr_chip_id.attr,
&dev_attr_cfam_id.attr,
&dev_attr_send_term.attr,
NULL,
};
static const struct attribute_group cfam_attr_group = {
.attrs = cfam_attr,
};
static const struct attribute_group *cfam_attr_groups[] = {
&cfam_attr_group,
NULL,
};
static char *cfam_devnode(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
const struct fsi_slave *slave = to_fsi_slave(dev);
#ifdef CONFIG_FSI_NEW_DEV_NODE
return kasprintf(GFP_KERNEL, "fsi/cfam%d", slave->cdev_idx);
#else
return kasprintf(GFP_KERNEL, "cfam%d", slave->cdev_idx);
#endif
}
static const struct device_type cfam_type = {
.name = "cfam",
.devnode = cfam_devnode,
.groups = cfam_attr_groups
};
static char *fsi_cdev_devnode(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
#ifdef CONFIG_FSI_NEW_DEV_NODE
return kasprintf(GFP_KERNEL, "fsi/%s", dev_name(dev));
#else
return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
#endif
}
const struct device_type fsi_cdev_type = {
.name = "fsi-cdev",
.devnode = fsi_cdev_devnode,
};
EXPORT_SYMBOL_GPL(fsi_cdev_type);
/* Backward compatible /dev/ numbering in "old style" mode */
static int fsi_adjust_index(int index)
{
#ifdef CONFIG_FSI_NEW_DEV_NODE
return index;
#else
return index + 1;
#endif
}
static int __fsi_get_new_minor(struct fsi_slave *slave, enum fsi_dev_type type,
dev_t *out_dev, int *out_index)
{
int cid = slave->chip_id;
int id;
/* Check if we qualify for legacy numbering */
if (cid >= 0 && cid < 16 && type < 4) {
/*
* Try reserving the legacy number, which has 0 - 0x3f reserved
* in the ida range. cid goes up to 0xf and type contains two
* bits, so construct the id with the below two bit shift.
*/
id = (cid << 2) | type;
id = ida_alloc_range(&fsi_minor_ida, id, id, GFP_KERNEL);
if (id >= 0) {
*out_index = fsi_adjust_index(cid);
*out_dev = fsi_base_dev + id;
return 0;
}
/* Other failure */
if (id != -ENOSPC)
return id;
/* Fallback to non-legacy allocation */
}
id = ida_alloc_range(&fsi_minor_ida, FSI_CHAR_LEGACY_TOP,
FSI_CHAR_MAX_DEVICES - 1, GFP_KERNEL);
if (id < 0)
return id;
*out_index = fsi_adjust_index(id);
*out_dev = fsi_base_dev + id;
return 0;
}
static const char *const fsi_dev_type_names[] = {
"cfam",
"sbefifo",
"scom",
"occ",
};
int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type,
dev_t *out_dev, int *out_index)
{
if (fdev->dev.of_node) {
int aid = of_alias_get_id(fdev->dev.of_node, fsi_dev_type_names[type]);
if (aid >= 0) {
/* Use the same scheme as the legacy numbers. */
int id = (aid << 2) | type;
id = ida_alloc_range(&fsi_minor_ida, id, id, GFP_KERNEL);
if (id >= 0) {
*out_index = aid;
*out_dev = fsi_base_dev + id;
return 0;
}
if (id != -ENOSPC)
return id;
}
}
return __fsi_get_new_minor(fdev->slave, type, out_dev, out_index);
}
EXPORT_SYMBOL_GPL(fsi_get_new_minor);
void fsi_free_minor(dev_t dev)
{
ida_free(&fsi_minor_ida, MINOR(dev));
}
EXPORT_SYMBOL_GPL(fsi_free_minor);
static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
{
uint32_t cfam_id;
struct fsi_slave *slave;
uint8_t crc;
__be32 data, llmode, slbus;
int rc;
/* Currently, we only support single slaves on a link, and use the
* full 23-bit address range
*/
if (id != 0)
return -EINVAL;
rc = fsi_master_read(master, link, id, 0, &data, sizeof(data));
if (rc) {
dev_dbg(&master->dev, "can't read slave %02x:%02x %d\n",
link, id, rc);
return -ENODEV;
}
cfam_id = be32_to_cpu(data);
crc = crc4(0, cfam_id, 32);
if (crc) {
trace_fsi_slave_invalid_cfam(master, link, cfam_id);
dev_warn(&master->dev, "slave %02x:%02x invalid cfam id CRC!\n",
link, id);
return -EIO;
}
dev_dbg(&master->dev, "fsi: found chip %08x at %02x:%02x:%02x\n",
cfam_id, master->idx, link, id);
/* If we're behind a master that doesn't provide a self-running bus
* clock, put the slave into async mode
*/
if (master->flags & FSI_MASTER_FLAG_SWCLOCK) {
llmode = cpu_to_be32(FSI_LLMODE_ASYNC);
rc = fsi_master_write(master, link, id,
FSI_SLAVE_BASE + FSI_LLMODE,
&llmode, sizeof(llmode));
if (rc)
dev_warn(&master->dev,
"can't set llmode on slave:%02x:%02x %d\n",
link, id, rc);
}
/* We can communicate with a slave; create the slave device and
* register.
*/
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
if (!slave)
return -ENOMEM;
dev_set_name(&slave->dev, "slave@%02x:%02x", link, id);
slave->dev.type = &cfam_type;
slave->dev.parent = &master->dev;
slave->dev.of_node = fsi_slave_find_of_node(master, link, id);
slave->dev.release = fsi_slave_release;
device_initialize(&slave->dev);
slave->cfam_id = cfam_id;
slave->master = master;
slave->link = link;
slave->id = id;
slave->size = FSI_SLAVE_SIZE_23b;
slave->t_send_delay = 16;
slave->t_echo_delay = 16;
/* Get chip ID if any */
slave->chip_id = -1;
if (slave->dev.of_node) {
uint32_t prop;
if (!of_property_read_u32(slave->dev.of_node, "chip-id", &prop))
slave->chip_id = prop;
}
slbus = cpu_to_be32(FSI_SLBUS_FORCE);
rc = fsi_master_write(master, link, id, FSI_SLAVE_BASE + FSI_SLBUS,
&slbus, sizeof(slbus));
if (rc)
dev_warn(&master->dev,
"can't set slbus on slave:%02x:%02x %d\n", link, id,
rc);
rc = fsi_slave_set_smode(slave);
if (rc) {
dev_warn(&master->dev,
"can't set smode on slave:%02x:%02x %d\n",
link, id, rc);
goto err_free;
}
/* Allocate a minor in the FSI space */
rc = __fsi_get_new_minor(slave, fsi_dev_cfam, &slave->dev.devt,
&slave->cdev_idx);
if (rc)
goto err_free;
trace_fsi_slave_init(slave);
/* Create chardev for userspace access */
cdev_init(&slave->cdev, &cfam_fops);
rc = cdev_device_add(&slave->cdev, &slave->dev);
if (rc) {
dev_err(&slave->dev, "Error %d creating slave device\n", rc);
goto err_free_ida;
}
/* Now that we have the cdev registered with the core, any fatal
* failures beyond this point will need to clean up through
* cdev_device_del(). Fortunately though, nothing past here is fatal.
*/
if (master->link_config)
master->link_config(master, link,
slave->t_send_delay,
slave->t_echo_delay);
/* Legacy raw file -> to be removed */
rc = device_create_bin_file(&slave->dev, &fsi_slave_raw_attr);
if (rc)
dev_warn(&slave->dev, "failed to create raw attr: %d\n", rc);
rc = fsi_slave_scan(slave);
if (rc)
dev_dbg(&master->dev, "failed during slave scan with: %d\n",
rc);
return 0;
err_free_ida:
fsi_free_minor(slave->dev.devt);
err_free:
of_node_put(slave->dev.of_node);
kfree(slave);
return rc;
}
/* FSI master support */
static int fsi_check_access(uint32_t addr, size_t size)
{
if (size == 4) {
if (addr & 0x3)
return -EINVAL;
} else if (size == 2) {
if (addr & 0x1)
return -EINVAL;
} else if (size != 1)
return -EINVAL;
return 0;
}
static int fsi_master_read(struct fsi_master *master, int link,
uint8_t slave_id, uint32_t addr, void *val, size_t size)
{
int rc;
trace_fsi_master_read(master, link, slave_id, addr, size);
rc = fsi_check_access(addr, size);
if (!rc)
rc = master->read(master, link, slave_id, addr, val, size);
trace_fsi_master_rw_result(master, link, slave_id, addr, size,
false, val, rc);
return rc;
}
static int fsi_master_write(struct fsi_master *master, int link,
uint8_t slave_id, uint32_t addr, const void *val, size_t size)
{
int rc;
trace_fsi_master_write(master, link, slave_id, addr, size, val);
rc = fsi_check_access(addr, size);
if (!rc)
rc = master->write(master, link, slave_id, addr, val, size);
trace_fsi_master_rw_result(master, link, slave_id, addr, size,
true, val, rc);
return rc;
}
static int fsi_master_link_disable(struct fsi_master *master, int link)
{
if (master->link_enable)
return master->link_enable(master, link, false);
return 0;
}
static int fsi_master_link_enable(struct fsi_master *master, int link)
{
if (master->link_enable)
return master->link_enable(master, link, true);
return 0;
}
/*
* Issue a break command on this link
*/
static int fsi_master_break(struct fsi_master *master, int link)
{
int rc = 0;
trace_fsi_master_break(master, link);
if (master->send_break)
rc = master->send_break(master, link);
if (master->link_config)
master->link_config(master, link, 16, 16);
return rc;
}
static int fsi_master_scan(struct fsi_master *master)
{
int link, rc;
trace_fsi_master_scan(master, true);
for (link = 0; link < master->n_links; link++) {
rc = fsi_master_link_enable(master, link);
if (rc) {
dev_dbg(&master->dev,
"enable link %d failed: %d\n", link, rc);
continue;
}
rc = fsi_master_break(master, link);
if (rc) {
fsi_master_link_disable(master, link);
dev_dbg(&master->dev,
"break to link %d failed: %d\n", link, rc);
continue;
}
rc = fsi_slave_init(master, link, 0);
if (rc)
fsi_master_link_disable(master, link);
}
return 0;
}
static int fsi_slave_remove_device(struct device *dev, void *arg)
{
device_unregister(dev);
return 0;
}
static int fsi_master_remove_slave(struct device *dev, void *arg)
{
struct fsi_slave *slave = to_fsi_slave(dev);
device_for_each_child(dev, NULL, fsi_slave_remove_device);
cdev_device_del(&slave->cdev, &slave->dev);
put_device(dev);
return 0;
}
static void fsi_master_unscan(struct fsi_master *master)
{
trace_fsi_master_scan(master, false);
device_for_each_child(&master->dev, NULL, fsi_master_remove_slave);
}
int fsi_master_rescan(struct fsi_master *master)
{
int rc;
mutex_lock(&master->scan_lock);
fsi_master_unscan(master);
rc = fsi_master_scan(master);
mutex_unlock(&master->scan_lock);
return rc;
}
EXPORT_SYMBOL_GPL(fsi_master_rescan);
static ssize_t master_rescan_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fsi_master *master = to_fsi_master(dev);
int rc;
rc = fsi_master_rescan(master);
if (rc < 0)
return rc;
return count;
}
static DEVICE_ATTR(rescan, 0200, NULL, master_rescan_store);
static ssize_t master_break_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fsi_master *master = to_fsi_master(dev);
fsi_master_break(master, 0);
return count;
}
static DEVICE_ATTR(break, 0200, NULL, master_break_store);
static struct attribute *master_attrs[] = {
&dev_attr_break.attr,
&dev_attr_rescan.attr,
NULL
};
ATTRIBUTE_GROUPS(master);
static struct class fsi_master_class = {
.name = "fsi-master",
.dev_groups = master_groups,
};
int fsi_master_register(struct fsi_master *master)
{
int rc;
struct device_node *np;
mutex_init(&master->scan_lock);
/* Alloc the requested index if it's non-zero */
if (master->idx) {
master->idx = ida_alloc_range(&master_ida, master->idx,
master->idx, GFP_KERNEL);
} else {
master->idx = ida_alloc(&master_ida, GFP_KERNEL);
}
if (master->idx < 0)
return master->idx;
if (!dev_name(&master->dev))
dev_set_name(&master->dev, "fsi%d", master->idx);
master->dev.class = &fsi_master_class;
mutex_lock(&master->scan_lock);
rc = device_register(&master->dev);
if (rc) {
ida_free(&master_ida, master->idx);
goto out;
}
np = dev_of_node(&master->dev);
if (!of_property_read_bool(np, "no-scan-on-init")) {
fsi_master_scan(master);
}
out:
mutex_unlock(&master->scan_lock);
return rc;
}
EXPORT_SYMBOL_GPL(fsi_master_register);
void fsi_master_unregister(struct fsi_master *master)
{
int idx = master->idx;
trace_fsi_master_unregister(master);
mutex_lock(&master->scan_lock);
fsi_master_unscan(master);
master->n_links = 0;
mutex_unlock(&master->scan_lock);
device_unregister(&master->dev);
ida_free(&master_ida, idx);
}
EXPORT_SYMBOL_GPL(fsi_master_unregister);
/* FSI core & Linux bus type definitions */
static int fsi_bus_match(struct device *dev, struct device_driver *drv)
{
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct fsi_driver *fsi_drv = to_fsi_drv(drv);
const struct fsi_device_id *id;
if (!fsi_drv->id_table)
return 0;
for (id = fsi_drv->id_table; id->engine_type; id++) {
if (id->engine_type != fsi_dev->engine_type)
continue;
if (id->version == FSI_VERSION_ANY ||
id->version == fsi_dev->version) {
if (drv->of_match_table) {
if (of_driver_match_device(dev, drv))
return 1;
} else {
return 1;
}
}
}
return 0;
}
int fsi_driver_register(struct fsi_driver *fsi_drv)
{
if (!fsi_drv)
return -EINVAL;
if (!fsi_drv->id_table)
return -EINVAL;
return driver_register(&fsi_drv->drv);
}
EXPORT_SYMBOL_GPL(fsi_driver_register);
void fsi_driver_unregister(struct fsi_driver *fsi_drv)
{
driver_unregister(&fsi_drv->drv);
}
EXPORT_SYMBOL_GPL(fsi_driver_unregister);
struct bus_type fsi_bus_type = {
.name = "fsi",
.match = fsi_bus_match,
};
EXPORT_SYMBOL_GPL(fsi_bus_type);
static int __init fsi_init(void)
{
int rc;
rc = alloc_chrdev_region(&fsi_base_dev, 0, FSI_CHAR_MAX_DEVICES, "fsi");
if (rc)
return rc;
rc = bus_register(&fsi_bus_type);
if (rc)
goto fail_bus;
rc = class_register(&fsi_master_class);
if (rc)
goto fail_class;
return 0;
fail_class:
bus_unregister(&fsi_bus_type);
fail_bus:
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
return rc;
}
postcore_initcall(fsi_init);
static void fsi_exit(void)
{
class_unregister(&fsi_master_class);
bus_unregister(&fsi_bus_type);
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
ida_destroy(&fsi_minor_ida);
}
module_exit(fsi_exit);
module_param(discard_errors, int, 0664);
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(discard_errors, "Don't invoke error handling on bus accesses");
| linux-master | drivers/fsi/fsi-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (C) IBM Corporation 2018
// FSI master driver for AST2600
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/fsi.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
#include <linux/gpio/consumer.h>
#include "fsi-master.h"
struct fsi_master_aspeed {
struct fsi_master master;
struct mutex lock; /* protect HW access */
struct device *dev;
void __iomem *base;
struct clk *clk;
struct gpio_desc *cfam_reset_gpio;
};
#define to_fsi_master_aspeed(m) \
container_of(m, struct fsi_master_aspeed, master)
/* Control register (size 0x400) */
static const u32 ctrl_base = 0x80000000;
static const u32 fsi_base = 0xa0000000;
#define OPB_FSI_VER 0x00
#define OPB_TRIGGER 0x04
#define OPB_CTRL_BASE 0x08
#define OPB_FSI_BASE 0x0c
#define OPB_CLK_SYNC 0x3c
#define OPB_IRQ_CLEAR 0x40
#define OPB_IRQ_MASK 0x44
#define OPB_IRQ_STATUS 0x48
#define OPB0_SELECT 0x10
#define OPB0_RW 0x14
#define OPB0_XFER_SIZE 0x18
#define OPB0_FSI_ADDR 0x1c
#define OPB0_FSI_DATA_W 0x20
#define OPB0_STATUS 0x80
#define OPB0_FSI_DATA_R 0x84
#define OPB0_WRITE_ORDER1 0x4c
#define OPB0_WRITE_ORDER2 0x50
#define OPB1_WRITE_ORDER1 0x54
#define OPB1_WRITE_ORDER2 0x58
#define OPB0_READ_ORDER1 0x5c
#define OPB1_READ_ORDER2 0x60
#define OPB_RETRY_COUNTER 0x64
/* OPBn_STATUS */
#define STATUS_HALFWORD_ACK BIT(0)
#define STATUS_FULLWORD_ACK BIT(1)
#define STATUS_ERR_ACK BIT(2)
#define STATUS_RETRY BIT(3)
#define STATUS_TIMEOUT BIT(4)
/* OPB_IRQ_MASK */
#define OPB1_XFER_ACK_EN BIT(17)
#define OPB0_XFER_ACK_EN BIT(16)
/* OPB_RW */
#define CMD_READ BIT(0)
#define CMD_WRITE 0
/* OPBx_XFER_SIZE */
#define XFER_FULLWORD (BIT(1) | BIT(0))
#define XFER_HALFWORD (BIT(0))
#define XFER_BYTE (0)
#define CREATE_TRACE_POINTS
#include <trace/events/fsi_master_aspeed.h>
#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
/* Run the bus at maximum speed by default */
#define FSI_DIVISOR_DEFAULT 1
#define FSI_DIVISOR_CABLED 2
static u16 aspeed_fsi_divisor = FSI_DIVISOR_DEFAULT;
module_param_named(bus_div,aspeed_fsi_divisor, ushort, 0);
#define OPB_POLL_TIMEOUT 500
static int __opb_write(struct fsi_master_aspeed *aspeed, u32 addr,
u32 val, u32 transfer_size)
{
void __iomem *base = aspeed->base;
u32 reg, status;
int ret;
/*
* The ordering of these writes up until the trigger
* write does not matter, so use writel_relaxed.
*/
writel_relaxed(CMD_WRITE, base + OPB0_RW);
writel_relaxed(transfer_size, base + OPB0_XFER_SIZE);
writel_relaxed(addr, base + OPB0_FSI_ADDR);
writel_relaxed(val, base + OPB0_FSI_DATA_W);
writel_relaxed(0x1, base + OPB_IRQ_CLEAR);
writel(0x1, base + OPB_TRIGGER);
ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
(reg & OPB0_XFER_ACK_EN) != 0,
0, OPB_POLL_TIMEOUT);
status = readl(base + OPB0_STATUS);
trace_fsi_master_aspeed_opb_write(addr, val, transfer_size, status, reg);
/* Return error when poll timed out */
if (ret)
return ret;
/* Command failed, master will reset */
if (status & STATUS_ERR_ACK)
return -EIO;
return 0;
}
static int opb_writeb(struct fsi_master_aspeed *aspeed, u32 addr, u8 val)
{
return __opb_write(aspeed, addr, val, XFER_BYTE);
}
static int opb_writew(struct fsi_master_aspeed *aspeed, u32 addr, __be16 val)
{
return __opb_write(aspeed, addr, (__force u16)val, XFER_HALFWORD);
}
static int opb_writel(struct fsi_master_aspeed *aspeed, u32 addr, __be32 val)
{
return __opb_write(aspeed, addr, (__force u32)val, XFER_FULLWORD);
}
static int __opb_read(struct fsi_master_aspeed *aspeed, uint32_t addr,
u32 transfer_size, void *out)
{
void __iomem *base = aspeed->base;
u32 result, reg;
int status, ret;
/*
* The ordering of these writes up until the trigger
* write does not matter, so use writel_relaxed.
*/
writel_relaxed(CMD_READ, base + OPB0_RW);
writel_relaxed(transfer_size, base + OPB0_XFER_SIZE);
writel_relaxed(addr, base + OPB0_FSI_ADDR);
writel_relaxed(0x1, base + OPB_IRQ_CLEAR);
writel(0x1, base + OPB_TRIGGER);
ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
(reg & OPB0_XFER_ACK_EN) != 0,
0, OPB_POLL_TIMEOUT);
status = readl(base + OPB0_STATUS);
result = readl(base + OPB0_FSI_DATA_R);
trace_fsi_master_aspeed_opb_read(addr, transfer_size, result,
readl(base + OPB0_STATUS),
reg);
/* Return error when poll timed out */
if (ret)
return ret;
/* Command failed, master will reset */
if (status & STATUS_ERR_ACK)
return -EIO;
if (out) {
switch (transfer_size) {
case XFER_BYTE:
*(u8 *)out = result;
break;
case XFER_HALFWORD:
*(u16 *)out = result;
break;
case XFER_FULLWORD:
*(u32 *)out = result;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int opb_readl(struct fsi_master_aspeed *aspeed, uint32_t addr, __be32 *out)
{
return __opb_read(aspeed, addr, XFER_FULLWORD, out);
}
static int opb_readw(struct fsi_master_aspeed *aspeed, uint32_t addr, __be16 *out)
{
return __opb_read(aspeed, addr, XFER_HALFWORD, (void *)out);
}
static int opb_readb(struct fsi_master_aspeed *aspeed, uint32_t addr, u8 *out)
{
return __opb_read(aspeed, addr, XFER_BYTE, (void *)out);
}
static int check_errors(struct fsi_master_aspeed *aspeed, int err)
{
int ret;
if (trace_fsi_master_aspeed_opb_error_enabled()) {
__be32 mresp0, mstap0, mesrb0;
opb_readl(aspeed, ctrl_base + FSI_MRESP0, &mresp0);
opb_readl(aspeed, ctrl_base + FSI_MSTAP0, &mstap0);
opb_readl(aspeed, ctrl_base + FSI_MESRB0, &mesrb0);
trace_fsi_master_aspeed_opb_error(
be32_to_cpu(mresp0),
be32_to_cpu(mstap0),
be32_to_cpu(mesrb0));
}
if (err == -EIO) {
/* Check MAEB (0x70) ? */
/* Then clear errors in master */
ret = opb_writel(aspeed, ctrl_base + FSI_MRESP0,
cpu_to_be32(FSI_MRESP_RST_ALL_MASTER));
if (ret) {
/* TODO: log? return different code? */
return ret;
}
/* TODO: confirm that 0x70 was okay */
}
/* This will pass through timeout errors */
return err;
}
static int aspeed_master_read(struct fsi_master *master, int link,
uint8_t id, uint32_t addr, void *val, size_t size)
{
struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
int ret;
if (id > 0x3)
return -EINVAL;
addr |= id << 21;
addr += link * FSI_HUB_LINK_SIZE;
mutex_lock(&aspeed->lock);
switch (size) {
case 1:
ret = opb_readb(aspeed, fsi_base + addr, val);
break;
case 2:
ret = opb_readw(aspeed, fsi_base + addr, val);
break;
case 4:
ret = opb_readl(aspeed, fsi_base + addr, val);
break;
default:
ret = -EINVAL;
goto done;
}
ret = check_errors(aspeed, ret);
done:
mutex_unlock(&aspeed->lock);
return ret;
}
static int aspeed_master_write(struct fsi_master *master, int link,
uint8_t id, uint32_t addr, const void *val, size_t size)
{
struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
int ret;
if (id > 0x3)
return -EINVAL;
addr |= id << 21;
addr += link * FSI_HUB_LINK_SIZE;
mutex_lock(&aspeed->lock);
switch (size) {
case 1:
ret = opb_writeb(aspeed, fsi_base + addr, *(u8 *)val);
break;
case 2:
ret = opb_writew(aspeed, fsi_base + addr, *(__be16 *)val);
break;
case 4:
ret = opb_writel(aspeed, fsi_base + addr, *(__be32 *)val);
break;
default:
ret = -EINVAL;
goto done;
}
ret = check_errors(aspeed, ret);
done:
mutex_unlock(&aspeed->lock);
return ret;
}
static int aspeed_master_link_enable(struct fsi_master *master, int link,
bool enable)
{
struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
int idx, bit, ret;
__be32 reg;
idx = link / 32;
bit = link % 32;
reg = cpu_to_be32(0x80000000 >> bit);
mutex_lock(&aspeed->lock);
if (!enable) {
ret = opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx), reg);
goto done;
}
ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg);
if (ret)
goto done;
mdelay(FSI_LINK_ENABLE_SETUP_TIME);
done:
mutex_unlock(&aspeed->lock);
return ret;
}
static int aspeed_master_term(struct fsi_master *master, int link, uint8_t id)
{
uint32_t addr;
__be32 cmd;
addr = 0x4;
cmd = cpu_to_be32(0xecc00000);
return aspeed_master_write(master, link, id, addr, &cmd, 4);
}
static int aspeed_master_break(struct fsi_master *master, int link)
{
uint32_t addr;
__be32 cmd;
addr = 0x0;
cmd = cpu_to_be32(0xc0de0000);
return aspeed_master_write(master, link, 0, addr, &cmd, 4);
}
static void aspeed_master_release(struct device *dev)
{
struct fsi_master_aspeed *aspeed =
to_fsi_master_aspeed(to_fsi_master(dev));
kfree(aspeed);
}
/* mmode encoders */
static inline u32 fsi_mmode_crs0(u32 x)
{
return (x & FSI_MMODE_CRS0MASK) << FSI_MMODE_CRS0SHFT;
}
static inline u32 fsi_mmode_crs1(u32 x)
{
return (x & FSI_MMODE_CRS1MASK) << FSI_MMODE_CRS1SHFT;
}
static int aspeed_master_init(struct fsi_master_aspeed *aspeed)
{
__be32 reg;
reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
| FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
/* Initialize the MFSI (hub master) engine */
reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
| FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
reg = cpu_to_be32(FSI_MECTRL_EOAE | FSI_MECTRL_P8_AUTO_TERM);
opb_writel(aspeed, ctrl_base + FSI_MECTRL, reg);
reg = cpu_to_be32(FSI_MMODE_ECRC | FSI_MMODE_EPC | FSI_MMODE_RELA
| fsi_mmode_crs0(aspeed_fsi_divisor)
| fsi_mmode_crs1(aspeed_fsi_divisor)
| FSI_MMODE_P8_TO_LSB);
dev_info(aspeed->dev, "mmode set to %08x (divisor %d)\n",
be32_to_cpu(reg), aspeed_fsi_divisor);
opb_writel(aspeed, ctrl_base + FSI_MMODE, reg);
reg = cpu_to_be32(0xffff0000);
opb_writel(aspeed, ctrl_base + FSI_MDLYR, reg);
reg = cpu_to_be32(~0);
opb_writel(aspeed, ctrl_base + FSI_MSENP0, reg);
/* Leave enabled long enough for master logic to set up */
mdelay(FSI_LINK_ENABLE_SETUP_TIME);
opb_writel(aspeed, ctrl_base + FSI_MCENP0, reg);
opb_readl(aspeed, ctrl_base + FSI_MAEB, NULL);
reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK);
opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
opb_readl(aspeed, ctrl_base + FSI_MLEVP0, NULL);
/* Reset the master bridge */
reg = cpu_to_be32(FSI_MRESB_RST_GEN);
opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
reg = cpu_to_be32(FSI_MRESB_RST_ERR);
opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
return 0;
}
static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct fsi_master_aspeed *aspeed = dev_get_drvdata(dev);
trace_fsi_master_aspeed_cfam_reset(true);
mutex_lock(&aspeed->lock);
gpiod_set_value(aspeed->cfam_reset_gpio, 1);
usleep_range(900, 1000);
gpiod_set_value(aspeed->cfam_reset_gpio, 0);
usleep_range(900, 1000);
opb_writel(aspeed, ctrl_base + FSI_MRESP0, cpu_to_be32(FSI_MRESP_RST_ALL_MASTER));
mutex_unlock(&aspeed->lock);
trace_fsi_master_aspeed_cfam_reset(false);
return count;
}
static DEVICE_ATTR(cfam_reset, 0200, NULL, cfam_reset_store);
static int setup_cfam_reset(struct fsi_master_aspeed *aspeed)
{
struct device *dev = aspeed->dev;
struct gpio_desc *gpio;
int rc;
gpio = devm_gpiod_get_optional(dev, "cfam-reset", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
if (!gpio)
return 0;
aspeed->cfam_reset_gpio = gpio;
rc = device_create_file(dev, &dev_attr_cfam_reset);
if (rc) {
devm_gpiod_put(dev, gpio);
return rc;
}
return 0;
}
static int tacoma_cabled_fsi_fixup(struct device *dev)
{
struct gpio_desc *routing_gpio, *mux_gpio;
int gpio;
/*
* The routing GPIO is a jumper indicating we should mux for the
* externally connected FSI cable.
*/
routing_gpio = devm_gpiod_get_optional(dev, "fsi-routing",
GPIOD_IN | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(routing_gpio))
return PTR_ERR(routing_gpio);
if (!routing_gpio)
return 0;
mux_gpio = devm_gpiod_get_optional(dev, "fsi-mux", GPIOD_ASIS);
if (IS_ERR(mux_gpio))
return PTR_ERR(mux_gpio);
if (!mux_gpio)
return 0;
gpio = gpiod_get_value(routing_gpio);
if (gpio < 0)
return gpio;
/* If the routing GPIO is high we should set the mux to low. */
if (gpio) {
/*
* Cable signal integrity means we should run the bus
* slightly slower. Do not override if a kernel param
* has already overridden.
*/
if (aspeed_fsi_divisor == FSI_DIVISOR_DEFAULT)
aspeed_fsi_divisor = FSI_DIVISOR_CABLED;
gpiod_direction_output(mux_gpio, 0);
dev_info(dev, "FSI configured for external cable\n");
} else {
gpiod_direction_output(mux_gpio, 1);
}
devm_gpiod_put(dev, routing_gpio);
return 0;
}
static int fsi_master_aspeed_probe(struct platform_device *pdev)
{
struct fsi_master_aspeed *aspeed;
int rc, links, reg;
__be32 raw;
rc = tacoma_cabled_fsi_fixup(&pdev->dev);
if (rc) {
dev_err(&pdev->dev, "Tacoma FSI cable fixup failed\n");
return rc;
}
aspeed = kzalloc(sizeof(*aspeed), GFP_KERNEL);
if (!aspeed)
return -ENOMEM;
aspeed->dev = &pdev->dev;
aspeed->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(aspeed->base)) {
rc = PTR_ERR(aspeed->base);
goto err_free_aspeed;
}
aspeed->clk = devm_clk_get(aspeed->dev, NULL);
if (IS_ERR(aspeed->clk)) {
dev_err(aspeed->dev, "couldn't get clock\n");
rc = PTR_ERR(aspeed->clk);
goto err_free_aspeed;
}
rc = clk_prepare_enable(aspeed->clk);
if (rc) {
dev_err(aspeed->dev, "couldn't enable clock\n");
goto err_free_aspeed;
}
rc = setup_cfam_reset(aspeed);
if (rc) {
dev_err(&pdev->dev, "CFAM reset GPIO setup failed\n");
}
writel(0x1, aspeed->base + OPB_CLK_SYNC);
writel(OPB1_XFER_ACK_EN | OPB0_XFER_ACK_EN,
aspeed->base + OPB_IRQ_MASK);
/* TODO: determine an appropriate value */
writel(0x10, aspeed->base + OPB_RETRY_COUNTER);
writel(ctrl_base, aspeed->base + OPB_CTRL_BASE);
writel(fsi_base, aspeed->base + OPB_FSI_BASE);
/* Set read data order */
writel(0x00030b1b, aspeed->base + OPB0_READ_ORDER1);
/* Set write data order */
writel(0x0011101b, aspeed->base + OPB0_WRITE_ORDER1);
writel(0x0c330f3f, aspeed->base + OPB0_WRITE_ORDER2);
/*
* Select OPB0 for all operations.
* Will need to be reworked when enabling DMA or anything that uses
* OPB1.
*/
writel(0x1, aspeed->base + OPB0_SELECT);
rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw);
if (rc) {
dev_err(&pdev->dev, "failed to read hub version\n");
goto err_release;
}
reg = be32_to_cpu(raw);
links = (reg >> 8) & 0xff;
dev_info(&pdev->dev, "hub version %08x (%d links)\n", reg, links);
aspeed->master.dev.parent = &pdev->dev;
aspeed->master.dev.release = aspeed_master_release;
aspeed->master.dev.of_node = of_node_get(dev_of_node(&pdev->dev));
aspeed->master.n_links = links;
aspeed->master.read = aspeed_master_read;
aspeed->master.write = aspeed_master_write;
aspeed->master.send_break = aspeed_master_break;
aspeed->master.term = aspeed_master_term;
aspeed->master.link_enable = aspeed_master_link_enable;
dev_set_drvdata(&pdev->dev, aspeed);
mutex_init(&aspeed->lock);
aspeed_master_init(aspeed);
rc = fsi_master_register(&aspeed->master);
if (rc)
goto err_release;
/* At this point, fsi_master_register performs the device_initialize(),
* and holds the sole reference on master.dev. This means the device
* will be freed (via ->release) during any subsequent call to
* fsi_master_unregister. We add our own reference to it here, so we
* can perform cleanup (in _remove()) without it being freed before
* we're ready.
*/
get_device(&aspeed->master.dev);
return 0;
err_release:
clk_disable_unprepare(aspeed->clk);
err_free_aspeed:
kfree(aspeed);
return rc;
}
static int fsi_master_aspeed_remove(struct platform_device *pdev)
{
struct fsi_master_aspeed *aspeed = platform_get_drvdata(pdev);
fsi_master_unregister(&aspeed->master);
clk_disable_unprepare(aspeed->clk);
return 0;
}
static const struct of_device_id fsi_master_aspeed_match[] = {
{ .compatible = "aspeed,ast2600-fsi-master" },
{ },
};
MODULE_DEVICE_TABLE(of, fsi_master_aspeed_match);
static struct platform_driver fsi_master_aspeed_driver = {
.driver = {
.name = "fsi-master-aspeed",
.of_match_table = fsi_master_aspeed_match,
},
.probe = fsi_master_aspeed_probe,
.remove = fsi_master_aspeed_remove,
};
module_platform_driver(fsi_master_aspeed_driver);
MODULE_LICENSE("GPL");
| linux-master | drivers/fsi/fsi-master-aspeed.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2018 IBM Corp
/*
* A FSI master controller, using a simple GPIO bit-banging interface
*/
#include <linux/crc4.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fsi.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/irqflags.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/firmware.h>
#include <linux/gpio/aspeed.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/genalloc.h>
#include "fsi-master.h"
#include "cf-fsi-fw.h"
#define FW_FILE_NAME "cf-fsi-fw.bin"
/* Common SCU based coprocessor control registers */
#define SCU_COPRO_CTRL 0x100
#define SCU_COPRO_RESET 0x00000002
#define SCU_COPRO_CLK_EN 0x00000001
/* AST2500 specific ones */
#define SCU_2500_COPRO_SEG0 0x104
#define SCU_2500_COPRO_SEG1 0x108
#define SCU_2500_COPRO_SEG2 0x10c
#define SCU_2500_COPRO_SEG3 0x110
#define SCU_2500_COPRO_SEG4 0x114
#define SCU_2500_COPRO_SEG5 0x118
#define SCU_2500_COPRO_SEG6 0x11c
#define SCU_2500_COPRO_SEG7 0x120
#define SCU_2500_COPRO_SEG8 0x124
#define SCU_2500_COPRO_SEG_SWAP 0x00000001
#define SCU_2500_COPRO_CACHE_CTL 0x128
#define SCU_2500_COPRO_CACHE_EN 0x00000001
#define SCU_2500_COPRO_SEG0_CACHE_EN 0x00000002
#define SCU_2500_COPRO_SEG1_CACHE_EN 0x00000004
#define SCU_2500_COPRO_SEG2_CACHE_EN 0x00000008
#define SCU_2500_COPRO_SEG3_CACHE_EN 0x00000010
#define SCU_2500_COPRO_SEG4_CACHE_EN 0x00000020
#define SCU_2500_COPRO_SEG5_CACHE_EN 0x00000040
#define SCU_2500_COPRO_SEG6_CACHE_EN 0x00000080
#define SCU_2500_COPRO_SEG7_CACHE_EN 0x00000100
#define SCU_2500_COPRO_SEG8_CACHE_EN 0x00000200
#define SCU_2400_COPRO_SEG0 0x104
#define SCU_2400_COPRO_SEG2 0x108
#define SCU_2400_COPRO_SEG4 0x10c
#define SCU_2400_COPRO_SEG6 0x110
#define SCU_2400_COPRO_SEG8 0x114
#define SCU_2400_COPRO_SEG_SWAP 0x80000000
#define SCU_2400_COPRO_CACHE_CTL 0x118
#define SCU_2400_COPRO_CACHE_EN 0x00000001
#define SCU_2400_COPRO_SEG0_CACHE_EN 0x00000002
#define SCU_2400_COPRO_SEG2_CACHE_EN 0x00000004
#define SCU_2400_COPRO_SEG4_CACHE_EN 0x00000008
#define SCU_2400_COPRO_SEG6_CACHE_EN 0x00000010
#define SCU_2400_COPRO_SEG8_CACHE_EN 0x00000020
/* CVIC registers */
#define CVIC_EN_REG 0x10
#define CVIC_TRIG_REG 0x18
/*
* System register base address (needed for configuring the
* coldfire maps)
*/
#define SYSREG_BASE 0x1e600000
/* Amount of SRAM required */
#define SRAM_SIZE 0x1000
#define LAST_ADDR_INVALID 0x1
struct fsi_master_acf {
struct fsi_master master;
struct device *dev;
struct regmap *scu;
struct mutex lock; /* mutex for command ordering */
struct gpio_desc *gpio_clk;
struct gpio_desc *gpio_data;
struct gpio_desc *gpio_trans; /* Voltage translator */
struct gpio_desc *gpio_enable; /* FSI enable */
struct gpio_desc *gpio_mux; /* Mux control */
uint16_t gpio_clk_vreg;
uint16_t gpio_clk_dreg;
uint16_t gpio_dat_vreg;
uint16_t gpio_dat_dreg;
uint16_t gpio_tra_vreg;
uint16_t gpio_tra_dreg;
uint8_t gpio_clk_bit;
uint8_t gpio_dat_bit;
uint8_t gpio_tra_bit;
uint32_t cf_mem_addr;
size_t cf_mem_size;
void __iomem *cf_mem;
void __iomem *cvic;
struct gen_pool *sram_pool;
void __iomem *sram;
bool is_ast2500;
bool external_mode;
bool trace_enabled;
uint32_t last_addr;
uint8_t t_send_delay;
uint8_t t_echo_delay;
uint32_t cvic_sw_irq;
};
#define to_fsi_master_acf(m) container_of(m, struct fsi_master_acf, master)
struct fsi_msg {
uint64_t msg;
uint8_t bits;
};
#define CREATE_TRACE_POINTS
#include <trace/events/fsi_master_ast_cf.h>
static void msg_push_bits(struct fsi_msg *msg, uint64_t data, int bits)
{
msg->msg <<= bits;
msg->msg |= data & ((1ull << bits) - 1);
msg->bits += bits;
}
static void msg_push_crc(struct fsi_msg *msg)
{
uint8_t crc;
int top;
top = msg->bits & 0x3;
/* start bit, and any non-aligned top bits */
crc = crc4(0, 1 << top | msg->msg >> (msg->bits - top), top + 1);
/* aligned bits */
crc = crc4(crc, msg->msg, msg->bits - top);
msg_push_bits(msg, crc, 4);
}
static void msg_finish_cmd(struct fsi_msg *cmd)
{
/* Left align message */
cmd->msg <<= (64 - cmd->bits);
}
static bool check_same_address(struct fsi_master_acf *master, int id,
uint32_t addr)
{
/* this will also handle LAST_ADDR_INVALID */
return master->last_addr == (((id & 0x3) << 21) | (addr & ~0x3));
}
static bool check_relative_address(struct fsi_master_acf *master, int id,
uint32_t addr, uint32_t *rel_addrp)
{
uint32_t last_addr = master->last_addr;
int32_t rel_addr;
if (last_addr == LAST_ADDR_INVALID)
return false;
/* We may be in 23-bit addressing mode, which uses the id as the
* top two address bits. So, if we're referencing a different ID,
* use absolute addresses.
*/
if (((last_addr >> 21) & 0x3) != id)
return false;
/* remove the top two bits from any 23-bit addressing */
last_addr &= (1 << 21) - 1;
/* We know that the addresses are limited to 21 bits, so this won't
* overflow the signed rel_addr */
rel_addr = addr - last_addr;
if (rel_addr > 255 || rel_addr < -256)
return false;
*rel_addrp = (uint32_t)rel_addr;
return true;
}
static void last_address_update(struct fsi_master_acf *master,
int id, bool valid, uint32_t addr)
{
if (!valid)
master->last_addr = LAST_ADDR_INVALID;
else
master->last_addr = ((id & 0x3) << 21) | (addr & ~0x3);
}
/*
* Encode an Absolute/Relative/Same Address command
*/
static void build_ar_command(struct fsi_master_acf *master,
struct fsi_msg *cmd, uint8_t id,
uint32_t addr, size_t size,
const void *data)
{
int i, addr_bits, opcode_bits;
bool write = !!data;
uint8_t ds, opcode;
uint32_t rel_addr;
cmd->bits = 0;
cmd->msg = 0;
/* we have 21 bits of address max */
addr &= ((1 << 21) - 1);
/* cmd opcodes are variable length - SAME_AR is only two bits */
opcode_bits = 3;
if (check_same_address(master, id, addr)) {
/* we still address the byte offset within the word */
addr_bits = 2;
opcode_bits = 2;
opcode = FSI_CMD_SAME_AR;
trace_fsi_master_acf_cmd_same_addr(master);
} else if (check_relative_address(master, id, addr, &rel_addr)) {
/* 8 bits plus sign */
addr_bits = 9;
addr = rel_addr;
opcode = FSI_CMD_REL_AR;
trace_fsi_master_acf_cmd_rel_addr(master, rel_addr);
} else {
addr_bits = 21;
opcode = FSI_CMD_ABS_AR;
trace_fsi_master_acf_cmd_abs_addr(master, addr);
}
/*
* The read/write size is encoded in the lower bits of the address
* (as it must be naturally-aligned), and the following ds bit.
*
* size addr:1 addr:0 ds
* 1 x x 0
* 2 x 0 1
* 4 0 1 1
*
*/
ds = size > 1 ? 1 : 0;
addr &= ~(size - 1);
if (size == 4)
addr |= 1;
msg_push_bits(cmd, id, 2);
msg_push_bits(cmd, opcode, opcode_bits);
msg_push_bits(cmd, write ? 0 : 1, 1);
msg_push_bits(cmd, addr, addr_bits);
msg_push_bits(cmd, ds, 1);
for (i = 0; write && i < size; i++)
msg_push_bits(cmd, ((uint8_t *)data)[i], 8);
msg_push_crc(cmd);
msg_finish_cmd(cmd);
}
static void build_dpoll_command(struct fsi_msg *cmd, uint8_t slave_id)
{
cmd->bits = 0;
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
msg_push_bits(cmd, FSI_CMD_DPOLL, 3);
msg_push_crc(cmd);
msg_finish_cmd(cmd);
}
static void build_epoll_command(struct fsi_msg *cmd, uint8_t slave_id)
{
cmd->bits = 0;
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
msg_push_bits(cmd, FSI_CMD_EPOLL, 3);
msg_push_crc(cmd);
msg_finish_cmd(cmd);
}
static void build_term_command(struct fsi_msg *cmd, uint8_t slave_id)
{
cmd->bits = 0;
cmd->msg = 0;
msg_push_bits(cmd, slave_id, 2);
msg_push_bits(cmd, FSI_CMD_TERM, 6);
msg_push_crc(cmd);
msg_finish_cmd(cmd);
}
static int do_copro_command(struct fsi_master_acf *master, uint32_t op)
{
uint32_t timeout = 10000000;
uint8_t stat;
trace_fsi_master_acf_copro_command(master, op);
/* Send command */
iowrite32be(op, master->sram + CMD_STAT_REG);
/* Ring doorbell if any */
if (master->cvic)
iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
/* Wait for status to indicate completion (or error) */
do {
if (timeout-- == 0) {
dev_warn(master->dev,
"Timeout waiting for coprocessor completion\n");
return -ETIMEDOUT;
}
stat = ioread8(master->sram + CMD_STAT_REG);
} while(stat < STAT_COMPLETE || stat == 0xff);
if (stat == STAT_COMPLETE)
return 0;
switch(stat) {
case STAT_ERR_INVAL_CMD:
return -EINVAL;
case STAT_ERR_INVAL_IRQ:
return -EIO;
case STAT_ERR_MTOE:
return -ESHUTDOWN;
}
return -ENXIO;
}
static int clock_zeros(struct fsi_master_acf *master, int count)
{
while (count) {
int rc, lcnt = min(count, 255);
rc = do_copro_command(master,
CMD_IDLE_CLOCKS | (lcnt << CMD_REG_CLEN_SHIFT));
if (rc)
return rc;
count -= lcnt;
}
return 0;
}
static int send_request(struct fsi_master_acf *master, struct fsi_msg *cmd,
unsigned int resp_bits)
{
uint32_t op;
trace_fsi_master_acf_send_request(master, cmd, resp_bits);
/* Store message into SRAM */
iowrite32be((cmd->msg >> 32), master->sram + CMD_DATA);
iowrite32be((cmd->msg & 0xffffffff), master->sram + CMD_DATA + 4);
op = CMD_COMMAND;
op |= cmd->bits << CMD_REG_CLEN_SHIFT;
if (resp_bits)
op |= resp_bits << CMD_REG_RLEN_SHIFT;
return do_copro_command(master, op);
}
static int read_copro_response(struct fsi_master_acf *master, uint8_t size,
uint32_t *response, u8 *tag)
{
uint8_t rtag = ioread8(master->sram + STAT_RTAG) & 0xf;
uint8_t rcrc = ioread8(master->sram + STAT_RCRC) & 0xf;
uint32_t rdata = 0;
uint32_t crc;
uint8_t ack;
*tag = ack = rtag & 3;
/* we have a whole message now; check CRC */
crc = crc4(0, 1, 1);
crc = crc4(crc, rtag, 4);
if (ack == FSI_RESP_ACK && size) {
rdata = ioread32be(master->sram + RSP_DATA);
crc = crc4(crc, rdata, size);
if (response)
*response = rdata;
}
crc = crc4(crc, rcrc, 4);
trace_fsi_master_acf_copro_response(master, rtag, rcrc, rdata, crc == 0);
if (crc) {
/*
* Check if it's all 1's or all 0's, that probably means
* the host is off
*/
if ((rtag == 0xf && rcrc == 0xf) || (rtag == 0 && rcrc == 0))
return -ENODEV;
dev_dbg(master->dev, "Bad response CRC !\n");
return -EAGAIN;
}
return 0;
}
static int send_term(struct fsi_master_acf *master, uint8_t slave)
{
struct fsi_msg cmd;
uint8_t tag;
int rc;
build_term_command(&cmd, slave);
rc = send_request(master, &cmd, 0);
if (rc) {
dev_warn(master->dev, "Error %d sending term\n", rc);
return rc;
}
rc = read_copro_response(master, 0, NULL, &tag);
if (rc < 0) {
dev_err(master->dev,
"TERM failed; lost communication with slave\n");
return -EIO;
} else if (tag != FSI_RESP_ACK) {
dev_err(master->dev, "TERM failed; response %d\n", tag);
return -EIO;
}
return 0;
}
static void dump_ucode_trace(struct fsi_master_acf *master)
{
char trbuf[52];
char *p;
int i;
dev_dbg(master->dev,
"CMDSTAT:%08x RTAG=%02x RCRC=%02x RDATA=%02x #INT=%08x\n",
ioread32be(master->sram + CMD_STAT_REG),
ioread8(master->sram + STAT_RTAG),
ioread8(master->sram + STAT_RCRC),
ioread32be(master->sram + RSP_DATA),
ioread32be(master->sram + INT_CNT));
for (i = 0; i < 512; i++) {
uint8_t v;
if ((i % 16) == 0)
p = trbuf;
v = ioread8(master->sram + TRACEBUF + i);
p += sprintf(p, "%02x ", v);
if (((i % 16) == 15) || v == TR_END)
dev_dbg(master->dev, "%s\n", trbuf);
if (v == TR_END)
break;
}
}
static int handle_response(struct fsi_master_acf *master,
uint8_t slave, uint8_t size, void *data)
{
int busy_count = 0, rc;
int crc_err_retries = 0;
struct fsi_msg cmd;
uint32_t response;
uint8_t tag;
retry:
rc = read_copro_response(master, size, &response, &tag);
/* Handle retries on CRC errors */
if (rc == -EAGAIN) {
/* Too many retries ? */
if (crc_err_retries++ > FSI_CRC_ERR_RETRIES) {
/*
* Pass it up as a -EIO otherwise upper level will retry
* the whole command which isn't what we want here.
*/
rc = -EIO;
goto bail;
}
trace_fsi_master_acf_crc_rsp_error(master, crc_err_retries);
if (master->trace_enabled)
dump_ucode_trace(master);
rc = clock_zeros(master, FSI_MASTER_EPOLL_CLOCKS);
if (rc) {
dev_warn(master->dev,
"Error %d clocking zeros for E_POLL\n", rc);
return rc;
}
build_epoll_command(&cmd, slave);
rc = send_request(master, &cmd, size);
if (rc) {
dev_warn(master->dev, "Error %d sending E_POLL\n", rc);
return -EIO;
}
goto retry;
}
if (rc)
return rc;
switch (tag) {
case FSI_RESP_ACK:
if (size && data) {
if (size == 32)
*(__be32 *)data = cpu_to_be32(response);
else if (size == 16)
*(__be16 *)data = cpu_to_be16(response);
else
*(u8 *)data = response;
}
break;
case FSI_RESP_BUSY:
/*
* Its necessary to clock slave before issuing
* d-poll, not indicated in the hardware protocol
* spec. < 20 clocks causes slave to hang, 21 ok.
*/
dev_dbg(master->dev, "Busy, retrying...\n");
if (master->trace_enabled)
dump_ucode_trace(master);
rc = clock_zeros(master, FSI_MASTER_DPOLL_CLOCKS);
if (rc) {
dev_warn(master->dev,
"Error %d clocking zeros for D_POLL\n", rc);
break;
}
if (busy_count++ < FSI_MASTER_MAX_BUSY) {
build_dpoll_command(&cmd, slave);
rc = send_request(master, &cmd, size);
if (rc) {
dev_warn(master->dev, "Error %d sending D_POLL\n", rc);
break;
}
goto retry;
}
dev_dbg(master->dev,
"ERR slave is stuck in busy state, issuing TERM\n");
send_term(master, slave);
rc = -EIO;
break;
case FSI_RESP_ERRA:
dev_dbg(master->dev, "ERRA received\n");
if (master->trace_enabled)
dump_ucode_trace(master);
rc = -EIO;
break;
case FSI_RESP_ERRC:
dev_dbg(master->dev, "ERRC received\n");
if (master->trace_enabled)
dump_ucode_trace(master);
rc = -EAGAIN;
break;
}
bail:
if (busy_count > 0) {
trace_fsi_master_acf_poll_response_busy(master, busy_count);
}
return rc;
}
static int fsi_master_acf_xfer(struct fsi_master_acf *master, uint8_t slave,
struct fsi_msg *cmd, size_t resp_len, void *resp)
{
int rc = -EAGAIN, retries = 0;
resp_len <<= 3;
while ((retries++) < FSI_CRC_ERR_RETRIES) {
rc = send_request(master, cmd, resp_len);
if (rc) {
if (rc != -ESHUTDOWN)
dev_warn(master->dev, "Error %d sending command\n", rc);
break;
}
rc = handle_response(master, slave, resp_len, resp);
if (rc != -EAGAIN)
break;
rc = -EIO;
dev_dbg(master->dev, "ECRC retry %d\n", retries);
/* Pace it a bit before retry */
msleep(1);
}
return rc;
}
static int fsi_master_acf_read(struct fsi_master *_master, int link,
uint8_t id, uint32_t addr, void *val,
size_t size)
{
struct fsi_master_acf *master = to_fsi_master_acf(_master);
struct fsi_msg cmd;
int rc;
if (link != 0)
return -ENODEV;
mutex_lock(&master->lock);
dev_dbg(master->dev, "read id %d addr %x size %zd\n", id, addr, size);
build_ar_command(master, &cmd, id, addr, size, NULL);
rc = fsi_master_acf_xfer(master, id, &cmd, size, val);
last_address_update(master, id, rc == 0, addr);
if (rc)
dev_dbg(master->dev, "read id %d addr 0x%08x err: %d\n",
id, addr, rc);
mutex_unlock(&master->lock);
return rc;
}
static int fsi_master_acf_write(struct fsi_master *_master, int link,
uint8_t id, uint32_t addr, const void *val,
size_t size)
{
struct fsi_master_acf *master = to_fsi_master_acf(_master);
struct fsi_msg cmd;
int rc;
if (link != 0)
return -ENODEV;
mutex_lock(&master->lock);
build_ar_command(master, &cmd, id, addr, size, val);
dev_dbg(master->dev, "write id %d addr %x size %zd raw_data: %08x\n",
id, addr, size, *(uint32_t *)val);
rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
last_address_update(master, id, rc == 0, addr);
if (rc)
dev_dbg(master->dev, "write id %d addr 0x%08x err: %d\n",
id, addr, rc);
mutex_unlock(&master->lock);
return rc;
}
static int fsi_master_acf_term(struct fsi_master *_master,
int link, uint8_t id)
{
struct fsi_master_acf *master = to_fsi_master_acf(_master);
struct fsi_msg cmd;
int rc;
if (link != 0)
return -ENODEV;
mutex_lock(&master->lock);
build_term_command(&cmd, id);
dev_dbg(master->dev, "term id %d\n", id);
rc = fsi_master_acf_xfer(master, id, &cmd, 0, NULL);
last_address_update(master, id, false, 0);
mutex_unlock(&master->lock);
return rc;
}
static int fsi_master_acf_break(struct fsi_master *_master, int link)
{
struct fsi_master_acf *master = to_fsi_master_acf(_master);
int rc;
if (link != 0)
return -ENODEV;
mutex_lock(&master->lock);
if (master->external_mode) {
mutex_unlock(&master->lock);
return -EBUSY;
}
dev_dbg(master->dev, "sending BREAK\n");
rc = do_copro_command(master, CMD_BREAK);
last_address_update(master, 0, false, 0);
mutex_unlock(&master->lock);
/* Wait for logic reset to take effect */
udelay(200);
return rc;
}
static void reset_cf(struct fsi_master_acf *master)
{
regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_RESET);
usleep_range(20,20);
regmap_write(master->scu, SCU_COPRO_CTRL, 0);
usleep_range(20,20);
}
static void start_cf(struct fsi_master_acf *master)
{
regmap_write(master->scu, SCU_COPRO_CTRL, SCU_COPRO_CLK_EN);
}
static void setup_ast2500_cf_maps(struct fsi_master_acf *master)
{
/*
* Note about byteswap setting: the bus is wired backwards,
* so setting the byteswap bit actually makes the ColdFire
* work "normally" for a BE processor, ie, put the MSB in
* the lowest address byte.
*
* We thus need to set the bit for our main memory which
* contains our program code. We create two mappings for
* the register, one with each setting.
*
* Segments 2 and 3 has a "swapped" mapping (BE)
* and 6 and 7 have a non-swapped mapping (LE) which allows
* us to avoid byteswapping register accesses since the
* registers are all LE.
*/
/* Setup segment 0 to our memory region */
regmap_write(master->scu, SCU_2500_COPRO_SEG0, master->cf_mem_addr |
SCU_2500_COPRO_SEG_SWAP);
/* Segments 2 and 3 to sysregs with byteswap (for SRAM) */
regmap_write(master->scu, SCU_2500_COPRO_SEG2, SYSREG_BASE |
SCU_2500_COPRO_SEG_SWAP);
regmap_write(master->scu, SCU_2500_COPRO_SEG3, SYSREG_BASE | 0x100000 |
SCU_2500_COPRO_SEG_SWAP);
/* And segment 6 and 7 to sysregs no byteswap */
regmap_write(master->scu, SCU_2500_COPRO_SEG6, SYSREG_BASE);
regmap_write(master->scu, SCU_2500_COPRO_SEG7, SYSREG_BASE | 0x100000);
/* Memory cachable, regs and SRAM not cachable */
regmap_write(master->scu, SCU_2500_COPRO_CACHE_CTL,
SCU_2500_COPRO_SEG0_CACHE_EN | SCU_2500_COPRO_CACHE_EN);
}
static void setup_ast2400_cf_maps(struct fsi_master_acf *master)
{
/* Setup segment 0 to our memory region */
regmap_write(master->scu, SCU_2400_COPRO_SEG0, master->cf_mem_addr |
SCU_2400_COPRO_SEG_SWAP);
/* Segments 2 to sysregs with byteswap (for SRAM) */
regmap_write(master->scu, SCU_2400_COPRO_SEG2, SYSREG_BASE |
SCU_2400_COPRO_SEG_SWAP);
/* And segment 6 to sysregs no byteswap */
regmap_write(master->scu, SCU_2400_COPRO_SEG6, SYSREG_BASE);
/* Memory cachable, regs and SRAM not cachable */
regmap_write(master->scu, SCU_2400_COPRO_CACHE_CTL,
SCU_2400_COPRO_SEG0_CACHE_EN | SCU_2400_COPRO_CACHE_EN);
}
static void setup_common_fw_config(struct fsi_master_acf *master,
void __iomem *base)
{
iowrite16be(master->gpio_clk_vreg, base + HDR_CLOCK_GPIO_VADDR);
iowrite16be(master->gpio_clk_dreg, base + HDR_CLOCK_GPIO_DADDR);
iowrite16be(master->gpio_dat_vreg, base + HDR_DATA_GPIO_VADDR);
iowrite16be(master->gpio_dat_dreg, base + HDR_DATA_GPIO_DADDR);
iowrite16be(master->gpio_tra_vreg, base + HDR_TRANS_GPIO_VADDR);
iowrite16be(master->gpio_tra_dreg, base + HDR_TRANS_GPIO_DADDR);
iowrite8(master->gpio_clk_bit, base + HDR_CLOCK_GPIO_BIT);
iowrite8(master->gpio_dat_bit, base + HDR_DATA_GPIO_BIT);
iowrite8(master->gpio_tra_bit, base + HDR_TRANS_GPIO_BIT);
}
static void setup_ast2500_fw_config(struct fsi_master_acf *master)
{
void __iomem *base = master->cf_mem + HDR_OFFSET;
setup_common_fw_config(master, base);
iowrite32be(FW_CONTROL_USE_STOP, base + HDR_FW_CONTROL);
}
static void setup_ast2400_fw_config(struct fsi_master_acf *master)
{
void __iomem *base = master->cf_mem + HDR_OFFSET;
setup_common_fw_config(master, base);
iowrite32be(FW_CONTROL_CONT_CLOCK|FW_CONTROL_DUMMY_RD, base + HDR_FW_CONTROL);
}
static int setup_gpios_for_copro(struct fsi_master_acf *master)
{
int rc;
/* This aren't under ColdFire control, just set them up appropriately */
gpiod_direction_output(master->gpio_mux, 1);
gpiod_direction_output(master->gpio_enable, 1);
/* Those are under ColdFire control, let it configure them */
rc = aspeed_gpio_copro_grab_gpio(master->gpio_clk, &master->gpio_clk_vreg,
&master->gpio_clk_dreg, &master->gpio_clk_bit);
if (rc) {
dev_err(master->dev, "failed to assign clock gpio to coprocessor\n");
return rc;
}
rc = aspeed_gpio_copro_grab_gpio(master->gpio_data, &master->gpio_dat_vreg,
&master->gpio_dat_dreg, &master->gpio_dat_bit);
if (rc) {
dev_err(master->dev, "failed to assign data gpio to coprocessor\n");
aspeed_gpio_copro_release_gpio(master->gpio_clk);
return rc;
}
rc = aspeed_gpio_copro_grab_gpio(master->gpio_trans, &master->gpio_tra_vreg,
&master->gpio_tra_dreg, &master->gpio_tra_bit);
if (rc) {
dev_err(master->dev, "failed to assign trans gpio to coprocessor\n");
aspeed_gpio_copro_release_gpio(master->gpio_clk);
aspeed_gpio_copro_release_gpio(master->gpio_data);
return rc;
}
return 0;
}
static void release_copro_gpios(struct fsi_master_acf *master)
{
aspeed_gpio_copro_release_gpio(master->gpio_clk);
aspeed_gpio_copro_release_gpio(master->gpio_data);
aspeed_gpio_copro_release_gpio(master->gpio_trans);
}
static int load_copro_firmware(struct fsi_master_acf *master)
{
const struct firmware *fw;
uint16_t sig = 0, wanted_sig;
const u8 *data;
size_t size = 0;
int rc;
/* Get the binary */
rc = request_firmware(&fw, FW_FILE_NAME, master->dev);
if (rc) {
dev_err(
master->dev, "Error %d to load firmware '%s' !\n",
rc, FW_FILE_NAME);
return rc;
}
/* Which image do we want ? (shared vs. split clock/data GPIOs) */
if (master->gpio_clk_vreg == master->gpio_dat_vreg)
wanted_sig = SYS_SIG_SHARED;
else
wanted_sig = SYS_SIG_SPLIT;
dev_dbg(master->dev, "Looking for image sig %04x\n", wanted_sig);
/* Try to find it */
for (data = fw->data; data < (fw->data + fw->size);) {
sig = be16_to_cpup((__be16 *)(data + HDR_OFFSET + HDR_SYS_SIG));
size = be32_to_cpup((__be32 *)(data + HDR_OFFSET + HDR_FW_SIZE));
if (sig == wanted_sig)
break;
data += size;
}
if (sig != wanted_sig) {
dev_err(master->dev, "Failed to locate image sig %04x in FW blob\n",
wanted_sig);
rc = -ENODEV;
goto release_fw;
}
if (size > master->cf_mem_size) {
dev_err(master->dev, "FW size (%zd) bigger than memory reserve (%zd)\n",
fw->size, master->cf_mem_size);
rc = -ENOMEM;
} else {
memcpy_toio(master->cf_mem, data, size);
}
release_fw:
release_firmware(fw);
return rc;
}
static int check_firmware_image(struct fsi_master_acf *master)
{
uint32_t fw_vers, fw_api, fw_options;
fw_vers = ioread16be(master->cf_mem + HDR_OFFSET + HDR_FW_VERS);
fw_api = ioread16be(master->cf_mem + HDR_OFFSET + HDR_API_VERS);
fw_options = ioread32be(master->cf_mem + HDR_OFFSET + HDR_FW_OPTIONS);
master->trace_enabled = !!(fw_options & FW_OPTION_TRACE_EN);
/* Check version and signature */
dev_info(master->dev, "ColdFire initialized, firmware v%d API v%d.%d (trace %s)\n",
fw_vers, fw_api >> 8, fw_api & 0xff,
master->trace_enabled ? "enabled" : "disabled");
if ((fw_api >> 8) != API_VERSION_MAJ) {
dev_err(master->dev, "Unsupported coprocessor API version !\n");
return -ENODEV;
}
return 0;
}
static int copro_enable_sw_irq(struct fsi_master_acf *master)
{
int timeout;
uint32_t val;
/*
* Enable coprocessor interrupt input. I've had problems getting the
* value to stick, so try in a loop
*/
for (timeout = 0; timeout < 10; timeout++) {
iowrite32(0x2, master->cvic + CVIC_EN_REG);
val = ioread32(master->cvic + CVIC_EN_REG);
if (val & 2)
break;
msleep(1);
}
if (!(val & 2)) {
dev_err(master->dev, "Failed to enable coprocessor interrupt !\n");
return -ENODEV;
}
return 0;
}
static int fsi_master_acf_setup(struct fsi_master_acf *master)
{
int timeout, rc;
uint32_t val;
/* Make sure the ColdFire is stopped */
reset_cf(master);
/*
* Clear SRAM. This needs to happen before we setup the GPIOs
* as we might start trying to arbitrate as soon as that happens.
*/
memset_io(master->sram, 0, SRAM_SIZE);
/* Configure GPIOs */
rc = setup_gpios_for_copro(master);
if (rc)
return rc;
/* Load the firmware into the reserved memory */
rc = load_copro_firmware(master);
if (rc)
return rc;
/* Read signature and check versions */
rc = check_firmware_image(master);
if (rc)
return rc;
/* Setup coldfire memory map */
if (master->is_ast2500) {
setup_ast2500_cf_maps(master);
setup_ast2500_fw_config(master);
} else {
setup_ast2400_cf_maps(master);
setup_ast2400_fw_config(master);
}
/* Start the ColdFire */
start_cf(master);
/* Wait for status register to indicate command completion
* which signals the initialization is complete
*/
for (timeout = 0; timeout < 10; timeout++) {
val = ioread8(master->sram + CF_STARTED);
if (val)
break;
msleep(1);
}
if (!val) {
dev_err(master->dev, "Coprocessor startup timeout !\n");
rc = -ENODEV;
goto err;
}
/* Configure echo & send delay */
iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
/* Enable SW interrupt to copro if any */
if (master->cvic) {
rc = copro_enable_sw_irq(master);
if (rc)
goto err;
}
return 0;
err:
/* An error occurred, don't leave the coprocessor running */
reset_cf(master);
/* Release the GPIOs */
release_copro_gpios(master);
return rc;
}
static void fsi_master_acf_terminate(struct fsi_master_acf *master)
{
unsigned long flags;
/*
* A GPIO arbitration requestion could come in while this is
* happening. To avoid problems, we disable interrupts so it
* cannot preempt us on this CPU
*/
local_irq_save(flags);
/* Stop the coprocessor */
reset_cf(master);
/* We mark the copro not-started */
iowrite32(0, master->sram + CF_STARTED);
/* We mark the ARB register as having given up arbitration to
* deal with a potential race with the arbitration request
*/
iowrite8(ARB_ARM_ACK, master->sram + ARB_REG);
local_irq_restore(flags);
/* Return the GPIOs to the ARM */
release_copro_gpios(master);
}
static void fsi_master_acf_setup_external(struct fsi_master_acf *master)
{
/* Setup GPIOs for external FSI master (FSP box) */
gpiod_direction_output(master->gpio_mux, 0);
gpiod_direction_output(master->gpio_trans, 0);
gpiod_direction_output(master->gpio_enable, 1);
gpiod_direction_input(master->gpio_clk);
gpiod_direction_input(master->gpio_data);
}
static int fsi_master_acf_link_enable(struct fsi_master *_master, int link,
bool enable)
{
struct fsi_master_acf *master = to_fsi_master_acf(_master);
int rc = -EBUSY;
if (link != 0)
return -ENODEV;
mutex_lock(&master->lock);
if (!master->external_mode) {
gpiod_set_value(master->gpio_enable, enable ? 1 : 0);
rc = 0;
}
mutex_unlock(&master->lock);
return rc;
}
static int fsi_master_acf_link_config(struct fsi_master *_master, int link,
u8 t_send_delay, u8 t_echo_delay)
{
struct fsi_master_acf *master = to_fsi_master_acf(_master);
if (link != 0)
return -ENODEV;
mutex_lock(&master->lock);
master->t_send_delay = t_send_delay;
master->t_echo_delay = t_echo_delay;
dev_dbg(master->dev, "Changing delays: send=%d echo=%d\n",
t_send_delay, t_echo_delay);
iowrite8(master->t_send_delay, master->sram + SEND_DLY_REG);
iowrite8(master->t_echo_delay, master->sram + ECHO_DLY_REG);
mutex_unlock(&master->lock);
return 0;
}
static ssize_t external_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fsi_master_acf *master = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE - 1, "%u\n",
master->external_mode ? 1 : 0);
}
static ssize_t external_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fsi_master_acf *master = dev_get_drvdata(dev);
unsigned long val;
bool external_mode;
int err;
err = kstrtoul(buf, 0, &val);
if (err)
return err;
external_mode = !!val;
mutex_lock(&master->lock);
if (external_mode == master->external_mode) {
mutex_unlock(&master->lock);
return count;
}
master->external_mode = external_mode;
if (master->external_mode) {
fsi_master_acf_terminate(master);
fsi_master_acf_setup_external(master);
} else
fsi_master_acf_setup(master);
mutex_unlock(&master->lock);
fsi_master_rescan(&master->master);
return count;
}
static DEVICE_ATTR(external_mode, 0664,
external_mode_show, external_mode_store);
static int fsi_master_acf_gpio_request(void *data)
{
struct fsi_master_acf *master = data;
int timeout;
u8 val;
/* Note: This doesn't require holding out mutex */
/* Write request */
iowrite8(ARB_ARM_REQ, master->sram + ARB_REG);
/*
* There is a race (which does happen at boot time) when we get an
* arbitration request as we are either about to or just starting
* the coprocessor.
*
* To handle it, we first check if we are running. If not yet we
* check whether the copro is started in the SCU.
*
* If it's not started, we can basically just assume we have arbitration
* and return. Otherwise, we wait normally expecting for the arbitration
* to eventually complete.
*/
if (ioread32(master->sram + CF_STARTED) == 0) {
unsigned int reg = 0;
regmap_read(master->scu, SCU_COPRO_CTRL, ®);
if (!(reg & SCU_COPRO_CLK_EN))
return 0;
}
/* Ring doorbell if any */
if (master->cvic)
iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
for (timeout = 0; timeout < 10000; timeout++) {
val = ioread8(master->sram + ARB_REG);
if (val != ARB_ARM_REQ)
break;
udelay(1);
}
/* If it failed, override anyway */
if (val != ARB_ARM_ACK)
dev_warn(master->dev, "GPIO request arbitration timeout\n");
return 0;
}
static int fsi_master_acf_gpio_release(void *data)
{
struct fsi_master_acf *master = data;
/* Write release */
iowrite8(0, master->sram + ARB_REG);
/* Ring doorbell if any */
if (master->cvic)
iowrite32(0x2, master->cvic + CVIC_TRIG_REG);
return 0;
}
static void fsi_master_acf_release(struct device *dev)
{
struct fsi_master_acf *master = to_fsi_master_acf(to_fsi_master(dev));
/* Cleanup, stop coprocessor */
mutex_lock(&master->lock);
fsi_master_acf_terminate(master);
aspeed_gpio_copro_set_ops(NULL, NULL);
mutex_unlock(&master->lock);
/* Free resources */
gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
of_node_put(dev_of_node(master->dev));
kfree(master);
}
static const struct aspeed_gpio_copro_ops fsi_master_acf_gpio_ops = {
.request_access = fsi_master_acf_gpio_request,
.release_access = fsi_master_acf_gpio_release,
};
static int fsi_master_acf_probe(struct platform_device *pdev)
{
struct device_node *np, *mnode = dev_of_node(&pdev->dev);
struct genpool_data_fixed gpdf;
struct fsi_master_acf *master;
struct gpio_desc *gpio;
struct resource res;
uint32_t cf_mem_align;
int rc;
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
master->dev = &pdev->dev;
master->master.dev.parent = master->dev;
master->last_addr = LAST_ADDR_INVALID;
/* AST2400 vs. AST2500 */
master->is_ast2500 = of_device_is_compatible(mnode, "aspeed,ast2500-cf-fsi-master");
/* Grab the SCU, we'll need to access it to configure the coprocessor */
if (master->is_ast2500)
master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
else
master->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2400-scu");
if (IS_ERR(master->scu)) {
dev_err(&pdev->dev, "failed to find SCU regmap\n");
rc = PTR_ERR(master->scu);
goto err_free;
}
/* Grab all the GPIOs we need */
gpio = devm_gpiod_get(&pdev->dev, "clock", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get clock gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_clk = gpio;
gpio = devm_gpiod_get(&pdev->dev, "data", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get data gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_data = gpio;
/* Optional GPIOs */
gpio = devm_gpiod_get_optional(&pdev->dev, "trans", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get trans gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_trans = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "enable", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get enable gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_enable = gpio;
gpio = devm_gpiod_get_optional(&pdev->dev, "mux", 0);
if (IS_ERR(gpio)) {
dev_err(&pdev->dev, "failed to get mux gpio\n");
rc = PTR_ERR(gpio);
goto err_free;
}
master->gpio_mux = gpio;
/* Grab the reserved memory region (use DMA API instead ?) */
np = of_parse_phandle(mnode, "memory-region", 0);
if (!np) {
dev_err(&pdev->dev, "Didn't find reserved memory\n");
rc = -EINVAL;
goto err_free;
}
rc = of_address_to_resource(np, 0, &res);
of_node_put(np);
if (rc) {
dev_err(&pdev->dev, "Couldn't address to resource for reserved memory\n");
rc = -ENOMEM;
goto err_free;
}
master->cf_mem_size = resource_size(&res);
master->cf_mem_addr = (uint32_t)res.start;
cf_mem_align = master->is_ast2500 ? 0x00100000 : 0x00200000;
if (master->cf_mem_addr & (cf_mem_align - 1)) {
dev_err(&pdev->dev, "Reserved memory has insufficient alignment\n");
rc = -ENOMEM;
goto err_free;
}
master->cf_mem = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(master->cf_mem)) {
rc = PTR_ERR(master->cf_mem);
goto err_free;
}
dev_dbg(&pdev->dev, "DRAM allocation @%x\n", master->cf_mem_addr);
/* AST2500 has a SW interrupt to the coprocessor */
if (master->is_ast2500) {
/* Grab the CVIC (ColdFire interrupts controller) */
np = of_parse_phandle(mnode, "aspeed,cvic", 0);
if (!np) {
dev_err(&pdev->dev, "Didn't find CVIC\n");
rc = -EINVAL;
goto err_free;
}
master->cvic = devm_of_iomap(&pdev->dev, np, 0, NULL);
if (IS_ERR(master->cvic)) {
of_node_put(np);
rc = PTR_ERR(master->cvic);
dev_err(&pdev->dev, "Error %d mapping CVIC\n", rc);
goto err_free;
}
rc = of_property_read_u32(np, "copro-sw-interrupts",
&master->cvic_sw_irq);
of_node_put(np);
if (rc) {
dev_err(&pdev->dev, "Can't find coprocessor SW interrupt\n");
goto err_free;
}
}
/* Grab the SRAM */
master->sram_pool = of_gen_pool_get(dev_of_node(&pdev->dev), "aspeed,sram", 0);
if (!master->sram_pool) {
rc = -ENODEV;
dev_err(&pdev->dev, "Can't find sram pool\n");
goto err_free;
}
/* Current microcode only deals with fixed location in SRAM */
gpdf.offset = 0;
master->sram = (void __iomem *)gen_pool_alloc_algo(master->sram_pool, SRAM_SIZE,
gen_pool_fixed_alloc, &gpdf);
if (!master->sram) {
rc = -ENOMEM;
dev_err(&pdev->dev, "Failed to allocate sram from pool\n");
goto err_free;
}
dev_dbg(&pdev->dev, "SRAM allocation @%lx\n",
(unsigned long)gen_pool_virt_to_phys(master->sram_pool,
(unsigned long)master->sram));
/*
* Hookup with the GPIO driver for arbitration of GPIO banks
* ownership.
*/
aspeed_gpio_copro_set_ops(&fsi_master_acf_gpio_ops, master);
/* Default FSI command delays */
master->t_send_delay = FSI_SEND_DELAY_CLOCKS;
master->t_echo_delay = FSI_ECHO_DELAY_CLOCKS;
master->master.n_links = 1;
if (master->is_ast2500)
master->master.flags = FSI_MASTER_FLAG_SWCLOCK;
master->master.read = fsi_master_acf_read;
master->master.write = fsi_master_acf_write;
master->master.term = fsi_master_acf_term;
master->master.send_break = fsi_master_acf_break;
master->master.link_enable = fsi_master_acf_link_enable;
master->master.link_config = fsi_master_acf_link_config;
master->master.dev.of_node = of_node_get(dev_of_node(master->dev));
master->master.dev.release = fsi_master_acf_release;
platform_set_drvdata(pdev, master);
mutex_init(&master->lock);
mutex_lock(&master->lock);
rc = fsi_master_acf_setup(master);
mutex_unlock(&master->lock);
if (rc)
goto release_of_dev;
rc = device_create_file(&pdev->dev, &dev_attr_external_mode);
if (rc)
goto stop_copro;
rc = fsi_master_register(&master->master);
if (!rc)
return 0;
device_remove_file(master->dev, &dev_attr_external_mode);
put_device(&master->master.dev);
return rc;
stop_copro:
fsi_master_acf_terminate(master);
release_of_dev:
aspeed_gpio_copro_set_ops(NULL, NULL);
gen_pool_free(master->sram_pool, (unsigned long)master->sram, SRAM_SIZE);
of_node_put(dev_of_node(master->dev));
err_free:
kfree(master);
return rc;
}
static int fsi_master_acf_remove(struct platform_device *pdev)
{
struct fsi_master_acf *master = platform_get_drvdata(pdev);
device_remove_file(master->dev, &dev_attr_external_mode);
fsi_master_unregister(&master->master);
return 0;
}
static const struct of_device_id fsi_master_acf_match[] = {
{ .compatible = "aspeed,ast2400-cf-fsi-master" },
{ .compatible = "aspeed,ast2500-cf-fsi-master" },
{ },
};
MODULE_DEVICE_TABLE(of, fsi_master_acf_match);
static struct platform_driver fsi_master_acf = {
.driver = {
.name = "fsi-master-acf",
.of_match_table = fsi_master_acf_match,
},
.probe = fsi_master_acf_probe,
.remove = fsi_master_acf_remove,
};
module_platform_driver(fsi_master_acf);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILE_NAME);
| linux-master | drivers/fsi/fsi-master-ast-cf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* common LSM auditing functions
*
* Based on code written for SELinux by :
* Stephen Smalley, <[email protected]>
* James Morris <[email protected]>
* Author : Etienne Basset, <[email protected]>
*/
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <net/sock.h>
#include <linux/un.h>
#include <net/af_unix.h>
#include <linux/audit.h>
#include <linux/ipv6.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/dccp.h>
#include <linux/sctp.h>
#include <linux/lsm_audit.h>
#include <linux/security.h>
/**
* ipv4_skb_to_auditdata : fill auditdata from skb
* @skb : the skb
* @ad : the audit data to fill
* @proto : the layer 4 protocol
*
* return 0 on success
*/
int ipv4_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto)
{
int ret = 0;
struct iphdr *ih;
ih = ip_hdr(skb);
ad->u.net->v4info.saddr = ih->saddr;
ad->u.net->v4info.daddr = ih->daddr;
if (proto)
*proto = ih->protocol;
/* non initial fragment */
if (ntohs(ih->frag_off) & IP_OFFSET)
return 0;
switch (ih->protocol) {
case IPPROTO_TCP: {
struct tcphdr *th = tcp_hdr(skb);
ad->u.net->sport = th->source;
ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
struct udphdr *uh = udp_hdr(skb);
ad->u.net->sport = uh->source;
ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
struct dccp_hdr *dh = dccp_hdr(skb);
ad->u.net->sport = dh->dccph_sport;
ad->u.net->dport = dh->dccph_dport;
break;
}
case IPPROTO_SCTP: {
struct sctphdr *sh = sctp_hdr(skb);
ad->u.net->sport = sh->source;
ad->u.net->dport = sh->dest;
break;
}
default:
ret = -EINVAL;
}
return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
/**
* ipv6_skb_to_auditdata : fill auditdata from skb
* @skb : the skb
* @ad : the audit data to fill
* @proto : the layer 4 protocol
*
* return 0 on success
*/
int ipv6_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto)
{
int offset, ret = 0;
struct ipv6hdr *ip6;
u8 nexthdr;
__be16 frag_off;
ip6 = ipv6_hdr(skb);
ad->u.net->v6info.saddr = ip6->saddr;
ad->u.net->v6info.daddr = ip6->daddr;
/* IPv6 can have several extension header before the Transport header
* skip them */
offset = skb_network_offset(skb);
offset += sizeof(*ip6);
nexthdr = ip6->nexthdr;
offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
if (offset < 0)
return 0;
if (proto)
*proto = nexthdr;
switch (nexthdr) {
case IPPROTO_TCP: {
struct tcphdr _tcph, *th;
th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
if (th == NULL)
break;
ad->u.net->sport = th->source;
ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
struct udphdr _udph, *uh;
uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
if (uh == NULL)
break;
ad->u.net->sport = uh->source;
ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
struct dccp_hdr _dccph, *dh;
dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
if (dh == NULL)
break;
ad->u.net->sport = dh->dccph_sport;
ad->u.net->dport = dh->dccph_dport;
break;
}
case IPPROTO_SCTP: {
struct sctphdr _sctph, *sh;
sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
if (sh == NULL)
break;
ad->u.net->sport = sh->source;
ad->u.net->dport = sh->dest;
break;
}
default:
ret = -EINVAL;
}
return ret;
}
#endif
static inline void print_ipv6_addr(struct audit_buffer *ab,
const struct in6_addr *addr, __be16 port,
char *name1, char *name2)
{
if (!ipv6_addr_any(addr))
audit_log_format(ab, " %s=%pI6c", name1, addr);
if (port)
audit_log_format(ab, " %s=%d", name2, ntohs(port));
}
static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
__be16 port, char *name1, char *name2)
{
if (addr)
audit_log_format(ab, " %s=%pI4", name1, &addr);
if (port)
audit_log_format(ab, " %s=%d", name2, ntohs(port));
}
/**
* dump_common_audit_data - helper to dump common audit data
* @ab : the audit buffer
* @a : common audit data
*
*/
static void dump_common_audit_data(struct audit_buffer *ab,
struct common_audit_data *a)
{
char comm[sizeof(current->comm)];
/*
* To keep stack sizes in check force programmers to notice if they
* start making this union too large! See struct lsm_network_audit
* as an example of how to deal with large data.
*/
BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));
switch (a->type) {
case LSM_AUDIT_DATA_NONE:
return;
case LSM_AUDIT_DATA_IPC:
audit_log_format(ab, " ipc_key=%d ", a->u.ipc_id);
break;
case LSM_AUDIT_DATA_CAP:
audit_log_format(ab, " capability=%d ", a->u.cap);
break;
case LSM_AUDIT_DATA_PATH: {
struct inode *inode;
audit_log_d_path(ab, " path=", &a->u.path);
inode = d_backing_inode(a->u.path.dentry);
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
break;
}
case LSM_AUDIT_DATA_FILE: {
struct inode *inode;
audit_log_d_path(ab, " path=", &a->u.file->f_path);
inode = file_inode(a->u.file);
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
break;
}
case LSM_AUDIT_DATA_IOCTL_OP: {
struct inode *inode;
audit_log_d_path(ab, " path=", &a->u.op->path);
inode = a->u.op->path.dentry->d_inode;
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
audit_log_format(ab, " ioctlcmd=0x%hx", a->u.op->cmd);
break;
}
case LSM_AUDIT_DATA_DENTRY: {
struct inode *inode;
audit_log_format(ab, " name=");
spin_lock(&a->u.dentry->d_lock);
audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
spin_unlock(&a->u.dentry->d_lock);
inode = d_backing_inode(a->u.dentry);
if (inode) {
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
break;
}
case LSM_AUDIT_DATA_INODE: {
struct dentry *dentry;
struct inode *inode;
rcu_read_lock();
inode = a->u.inode;
dentry = d_find_alias_rcu(inode);
if (dentry) {
audit_log_format(ab, " name=");
spin_lock(&dentry->d_lock);
audit_log_untrustedstring(ab, dentry->d_name.name);
spin_unlock(&dentry->d_lock);
}
audit_log_format(ab, " dev=");
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
rcu_read_unlock();
break;
}
case LSM_AUDIT_DATA_TASK: {
struct task_struct *tsk = a->u.tsk;
if (tsk) {
pid_t pid = task_tgid_nr(tsk);
if (pid) {
char comm[sizeof(tsk->comm)];
audit_log_format(ab, " opid=%d ocomm=", pid);
audit_log_untrustedstring(ab,
memcpy(comm, tsk->comm, sizeof(comm)));
}
}
break;
}
case LSM_AUDIT_DATA_NET:
if (a->u.net->sk) {
const struct sock *sk = a->u.net->sk;
const struct unix_sock *u;
struct unix_address *addr;
int len = 0;
char *p = NULL;
switch (sk->sk_family) {
case AF_INET: {
const struct inet_sock *inet = inet_sk(sk);
print_ipv4_addr(ab, inet->inet_rcv_saddr,
inet->inet_sport,
"laddr", "lport");
print_ipv4_addr(ab, inet->inet_daddr,
inet->inet_dport,
"faddr", "fport");
break;
}
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
const struct inet_sock *inet = inet_sk(sk);
print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
inet->inet_sport,
"laddr", "lport");
print_ipv6_addr(ab, &sk->sk_v6_daddr,
inet->inet_dport,
"faddr", "fport");
break;
}
#endif
case AF_UNIX:
u = unix_sk(sk);
addr = smp_load_acquire(&u->addr);
if (!addr)
break;
if (u->path.dentry) {
audit_log_d_path(ab, " path=", &u->path);
break;
}
len = addr->len-sizeof(short);
p = &addr->name->sun_path[0];
audit_log_format(ab, " path=");
if (*p)
audit_log_untrustedstring(ab, p);
else
audit_log_n_hex(ab, p, len);
break;
}
}
switch (a->u.net->family) {
case AF_INET:
print_ipv4_addr(ab, a->u.net->v4info.saddr,
a->u.net->sport,
"saddr", "src");
print_ipv4_addr(ab, a->u.net->v4info.daddr,
a->u.net->dport,
"daddr", "dest");
break;
case AF_INET6:
print_ipv6_addr(ab, &a->u.net->v6info.saddr,
a->u.net->sport,
"saddr", "src");
print_ipv6_addr(ab, &a->u.net->v6info.daddr,
a->u.net->dport,
"daddr", "dest");
break;
}
if (a->u.net->netif > 0) {
struct net_device *dev;
/* NOTE: we always use init's namespace */
dev = dev_get_by_index(&init_net, a->u.net->netif);
if (dev) {
audit_log_format(ab, " netif=%s", dev->name);
dev_put(dev);
}
}
break;
#ifdef CONFIG_KEYS
case LSM_AUDIT_DATA_KEY:
audit_log_format(ab, " key_serial=%u", a->u.key_struct.key);
if (a->u.key_struct.key_desc) {
audit_log_format(ab, " key_desc=");
audit_log_untrustedstring(ab, a->u.key_struct.key_desc);
}
break;
#endif
case LSM_AUDIT_DATA_KMOD:
audit_log_format(ab, " kmod=");
audit_log_untrustedstring(ab, a->u.kmod_name);
break;
case LSM_AUDIT_DATA_IBPKEY: {
struct in6_addr sbn_pfx;
memset(&sbn_pfx.s6_addr, 0,
sizeof(sbn_pfx.s6_addr));
memcpy(&sbn_pfx.s6_addr, &a->u.ibpkey->subnet_prefix,
sizeof(a->u.ibpkey->subnet_prefix));
audit_log_format(ab, " pkey=0x%x subnet_prefix=%pI6c",
a->u.ibpkey->pkey, &sbn_pfx);
break;
}
case LSM_AUDIT_DATA_IBENDPORT:
audit_log_format(ab, " device=%s port_num=%u",
a->u.ibendport->dev_name,
a->u.ibendport->port);
break;
case LSM_AUDIT_DATA_LOCKDOWN:
audit_log_format(ab, " lockdown_reason=\"%s\"",
lockdown_reasons[a->u.reason]);
break;
case LSM_AUDIT_DATA_ANONINODE:
audit_log_format(ab, " anonclass=%s", a->u.anonclass);
break;
} /* switch (a->type) */
}
/**
* common_lsm_audit - generic LSM auditing function
* @a: auxiliary audit data
* @pre_audit: lsm-specific pre-audit callback
* @post_audit: lsm-specific post-audit callback
*
* setup the audit buffer for common security information
* uses callback to print LSM specific information
*/
void common_lsm_audit(struct common_audit_data *a,
void (*pre_audit)(struct audit_buffer *, void *),
void (*post_audit)(struct audit_buffer *, void *))
{
struct audit_buffer *ab;
if (a == NULL)
return;
/* we use GFP_ATOMIC so we won't sleep */
ab = audit_log_start(audit_context(), GFP_ATOMIC | __GFP_NOWARN,
AUDIT_AVC);
if (ab == NULL)
return;
if (pre_audit)
pre_audit(ab, a);
dump_common_audit_data(ab, a);
if (post_audit)
post_audit(ab, a);
audit_log_end(ab);
}
| linux-master | security/lsm_audit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* inode.c - securityfs
*
* Copyright (C) 2005 Greg Kroah-Hartman <[email protected]>
*
* Based on fs/debugfs/inode.c which had the following copyright notice:
* Copyright (C) 2004 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2004 IBM Inc.
*/
/* #define DEBUG */
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/lsm_hooks.h>
#include <linux/magic.h>
static struct vfsmount *mount;
static int mount_count;
static void securityfs_free_inode(struct inode *inode)
{
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
free_inode_nonrcu(inode);
}
static const struct super_operations securityfs_super_operations = {
.statfs = simple_statfs,
.free_inode = securityfs_free_inode,
};
static int securityfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr files[] = {{""}};
int error;
error = simple_fill_super(sb, SECURITYFS_MAGIC, files);
if (error)
return error;
sb->s_op = &securityfs_super_operations;
return 0;
}
static int securityfs_get_tree(struct fs_context *fc)
{
return get_tree_single(fc, securityfs_fill_super);
}
static const struct fs_context_operations securityfs_context_ops = {
.get_tree = securityfs_get_tree,
};
static int securityfs_init_fs_context(struct fs_context *fc)
{
fc->ops = &securityfs_context_ops;
return 0;
}
static struct file_system_type fs_type = {
.owner = THIS_MODULE,
.name = "securityfs",
.init_fs_context = securityfs_init_fs_context,
.kill_sb = kill_litter_super,
};
/**
* securityfs_create_dentry - create a dentry in the securityfs filesystem
*
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the securityfs filesystem.
* @data: a pointer to something that the caller will want to get to later
* on. The inode.i_private pointer will point to this value on
* the open() call.
* @fops: a pointer to a struct file_operations that should be used for
* this file.
* @iops: a point to a struct of inode_operations that should be used for
* this file/dir
*
* This is the basic "create a file/dir/symlink" function for
* securityfs. It allows for a wide range of flexibility in creating
* a file, or a directory (if you want to create a directory, the
* securityfs_create_dir() function is recommended to be used
* instead).
*
* This function returns a pointer to a dentry if it succeeds. This
* pointer must be passed to the securityfs_remove() function when the
* file is to be removed (no automatic cleanup happens if your module
* is unloaded, you are responsible here). If an error occurs, the
* function will return the error value (via ERR_PTR).
*
* If securityfs is not enabled in the kernel, the value %-ENODEV is
* returned.
*/
static struct dentry *securityfs_create_dentry(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops,
const struct inode_operations *iops)
{
struct dentry *dentry;
struct inode *dir, *inode;
int error;
if (!(mode & S_IFMT))
mode = (mode & S_IALLUGO) | S_IFREG;
pr_debug("securityfs: creating file '%s'\n",name);
error = simple_pin_fs(&fs_type, &mount, &mount_count);
if (error)
return ERR_PTR(error);
if (!parent)
parent = mount->mnt_root;
dir = d_inode(parent);
inode_lock(dir);
dentry = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(dentry))
goto out;
if (d_really_is_positive(dentry)) {
error = -EEXIST;
goto out1;
}
inode = new_inode(dir->i_sb);
if (!inode) {
error = -ENOMEM;
goto out1;
}
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inc_nlink(inode);
inc_nlink(dir);
} else if (S_ISLNK(mode)) {
inode->i_op = iops ? iops : &simple_symlink_inode_operations;
inode->i_link = data;
} else {
inode->i_fop = fops;
}
d_instantiate(dentry, inode);
dget(dentry);
inode_unlock(dir);
return dentry;
out1:
dput(dentry);
dentry = ERR_PTR(error);
out:
inode_unlock(dir);
simple_release_fs(&mount, &mount_count);
return dentry;
}
/**
* securityfs_create_file - create a file in the securityfs filesystem
*
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* file will be created in the root of the securityfs filesystem.
* @data: a pointer to something that the caller will want to get to later
* on. The inode.i_private pointer will point to this value on
* the open() call.
* @fops: a pointer to a struct file_operations that should be used for
* this file.
*
* This function creates a file in securityfs with the given @name.
*
* This function returns a pointer to a dentry if it succeeds. This
* pointer must be passed to the securityfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here). If an error occurs, the function will return
* the error value (via ERR_PTR).
*
* If securityfs is not enabled in the kernel, the value %-ENODEV is
* returned.
*/
struct dentry *securityfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
return securityfs_create_dentry(name, mode, parent, data, fops, NULL);
}
EXPORT_SYMBOL_GPL(securityfs_create_file);
/**
* securityfs_create_dir - create a directory in the securityfs filesystem
*
* @name: a pointer to a string containing the name of the directory to
* create.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* directory will be created in the root of the securityfs filesystem.
*
* This function creates a directory in securityfs with the given @name.
*
* This function returns a pointer to a dentry if it succeeds. This
* pointer must be passed to the securityfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here). If an error occurs, the function will return
* the error value (via ERR_PTR).
*
* If securityfs is not enabled in the kernel, the value %-ENODEV is
* returned.
*/
struct dentry *securityfs_create_dir(const char *name, struct dentry *parent)
{
return securityfs_create_file(name, S_IFDIR | 0755, parent, NULL, NULL);
}
EXPORT_SYMBOL_GPL(securityfs_create_dir);
/**
* securityfs_create_symlink - create a symlink in the securityfs filesystem
*
* @name: a pointer to a string containing the name of the symlink to
* create.
* @parent: a pointer to the parent dentry for the symlink. This should be a
* directory dentry if set. If this parameter is %NULL, then the
* directory will be created in the root of the securityfs filesystem.
* @target: a pointer to a string containing the name of the symlink's target.
* If this parameter is %NULL, then the @iops parameter needs to be
* setup to handle .readlink and .get_link inode_operations.
* @iops: a pointer to the struct inode_operations to use for the symlink. If
* this parameter is %NULL, then the default simple_symlink_inode
* operations will be used.
*
* This function creates a symlink in securityfs with the given @name.
*
* This function returns a pointer to a dentry if it succeeds. This
* pointer must be passed to the securityfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here). If an error occurs, the function will return
* the error value (via ERR_PTR).
*
* If securityfs is not enabled in the kernel, the value %-ENODEV is
* returned.
*/
struct dentry *securityfs_create_symlink(const char *name,
struct dentry *parent,
const char *target,
const struct inode_operations *iops)
{
struct dentry *dent;
char *link = NULL;
if (target) {
link = kstrdup(target, GFP_KERNEL);
if (!link)
return ERR_PTR(-ENOMEM);
}
dent = securityfs_create_dentry(name, S_IFLNK | 0444, parent,
link, NULL, iops);
if (IS_ERR(dent))
kfree(link);
return dent;
}
EXPORT_SYMBOL_GPL(securityfs_create_symlink);
/**
* securityfs_remove - removes a file or directory from the securityfs filesystem
*
* @dentry: a pointer to a the dentry of the file or directory to be removed.
*
* This function removes a file or directory in securityfs that was previously
* created with a call to another securityfs function (like
* securityfs_create_file() or variants thereof.)
*
* This function is required to be called in order for the file to be
* removed. No automatic cleanup of files will happen when a module is
* removed; you are responsible here.
*/
void securityfs_remove(struct dentry *dentry)
{
struct inode *dir;
if (!dentry || IS_ERR(dentry))
return;
dir = d_inode(dentry->d_parent);
inode_lock(dir);
if (simple_positive(dentry)) {
if (d_is_dir(dentry))
simple_rmdir(dir, dentry);
else
simple_unlink(dir, dentry);
dput(dentry);
}
inode_unlock(dir);
simple_release_fs(&mount, &mount_count);
}
EXPORT_SYMBOL_GPL(securityfs_remove);
#ifdef CONFIG_SECURITY
static struct dentry *lsm_dentry;
static ssize_t lsm_read(struct file *filp, char __user *buf, size_t count,
loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, lsm_names,
strlen(lsm_names));
}
static const struct file_operations lsm_ops = {
.read = lsm_read,
.llseek = generic_file_llseek,
};
#endif
static int __init securityfs_init(void)
{
int retval;
retval = sysfs_create_mount_point(kernel_kobj, "security");
if (retval)
return retval;
retval = register_filesystem(&fs_type);
if (retval) {
sysfs_remove_mount_point(kernel_kobj, "security");
return retval;
}
#ifdef CONFIG_SECURITY
lsm_dentry = securityfs_create_file("lsm", 0444, NULL, NULL,
&lsm_ops);
#endif
return 0;
}
core_initcall(securityfs_init);
| linux-master | security/inode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* device_cgroup.c - device cgroup subsystem
*
* Copyright 2007 IBM Corp
*/
#include <linux/bpf-cgroup.h>
#include <linux/device_cgroup.h>
#include <linux/cgroup.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#ifdef CONFIG_CGROUP_DEVICE
static DEFINE_MUTEX(devcgroup_mutex);
enum devcg_behavior {
DEVCG_DEFAULT_NONE,
DEVCG_DEFAULT_ALLOW,
DEVCG_DEFAULT_DENY,
};
/*
* exception list locking rules:
* hold devcgroup_mutex for update/read.
* hold rcu_read_lock() for read.
*/
struct dev_exception_item {
u32 major, minor;
short type;
short access;
struct list_head list;
struct rcu_head rcu;
};
struct dev_cgroup {
struct cgroup_subsys_state css;
struct list_head exceptions;
enum devcg_behavior behavior;
};
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
{
return s ? container_of(s, struct dev_cgroup, css) : NULL;
}
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
{
return css_to_devcgroup(task_css(task, devices_cgrp_id));
}
/*
* called under devcgroup_mutex
*/
static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
{
struct dev_exception_item *ex, *tmp, *new;
lockdep_assert_held(&devcgroup_mutex);
list_for_each_entry(ex, orig, list) {
new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
if (!new)
goto free_and_exit;
list_add_tail(&new->list, dest);
}
return 0;
free_and_exit:
list_for_each_entry_safe(ex, tmp, dest, list) {
list_del(&ex->list);
kfree(ex);
}
return -ENOMEM;
}
static void dev_exceptions_move(struct list_head *dest, struct list_head *orig)
{
struct dev_exception_item *ex, *tmp;
lockdep_assert_held(&devcgroup_mutex);
list_for_each_entry_safe(ex, tmp, orig, list) {
list_move_tail(&ex->list, dest);
}
}
/*
* called under devcgroup_mutex
*/
static int dev_exception_add(struct dev_cgroup *dev_cgroup,
struct dev_exception_item *ex)
{
struct dev_exception_item *excopy, *walk;
lockdep_assert_held(&devcgroup_mutex);
excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
if (!excopy)
return -ENOMEM;
list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
if (walk->type != ex->type)
continue;
if (walk->major != ex->major)
continue;
if (walk->minor != ex->minor)
continue;
walk->access |= ex->access;
kfree(excopy);
excopy = NULL;
}
if (excopy != NULL)
list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
return 0;
}
/*
* called under devcgroup_mutex
*/
static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
struct dev_exception_item *ex)
{
struct dev_exception_item *walk, *tmp;
lockdep_assert_held(&devcgroup_mutex);
list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
if (walk->type != ex->type)
continue;
if (walk->major != ex->major)
continue;
if (walk->minor != ex->minor)
continue;
walk->access &= ~ex->access;
if (!walk->access) {
list_del_rcu(&walk->list);
kfree_rcu(walk, rcu);
}
}
}
static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
{
struct dev_exception_item *ex, *tmp;
list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
list_del_rcu(&ex->list);
kfree_rcu(ex, rcu);
}
}
/**
* dev_exception_clean - frees all entries of the exception list
* @dev_cgroup: dev_cgroup with the exception list to be cleaned
*
* called under devcgroup_mutex
*/
static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
{
lockdep_assert_held(&devcgroup_mutex);
__dev_exception_clean(dev_cgroup);
}
static inline bool is_devcg_online(const struct dev_cgroup *devcg)
{
return (devcg->behavior != DEVCG_DEFAULT_NONE);
}
/**
* devcgroup_online - initializes devcgroup's behavior and exceptions based on
* parent's
* @css: css getting online
* returns 0 in case of success, error code otherwise
*/
static int devcgroup_online(struct cgroup_subsys_state *css)
{
struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
int ret = 0;
mutex_lock(&devcgroup_mutex);
if (parent_dev_cgroup == NULL)
dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
else {
ret = dev_exceptions_copy(&dev_cgroup->exceptions,
&parent_dev_cgroup->exceptions);
if (!ret)
dev_cgroup->behavior = parent_dev_cgroup->behavior;
}
mutex_unlock(&devcgroup_mutex);
return ret;
}
static void devcgroup_offline(struct cgroup_subsys_state *css)
{
struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
mutex_lock(&devcgroup_mutex);
dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
mutex_unlock(&devcgroup_mutex);
}
/*
* called from kernel/cgroup/cgroup.c with cgroup_lock() held.
*/
static struct cgroup_subsys_state *
devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct dev_cgroup *dev_cgroup;
dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
if (!dev_cgroup)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&dev_cgroup->exceptions);
dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
return &dev_cgroup->css;
}
static void devcgroup_css_free(struct cgroup_subsys_state *css)
{
struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
__dev_exception_clean(dev_cgroup);
kfree(dev_cgroup);
}
#define DEVCG_ALLOW 1
#define DEVCG_DENY 2
#define DEVCG_LIST 3
#define MAJMINLEN 13
#define ACCLEN 4
static void set_access(char *acc, short access)
{
int idx = 0;
memset(acc, 0, ACCLEN);
if (access & DEVCG_ACC_READ)
acc[idx++] = 'r';
if (access & DEVCG_ACC_WRITE)
acc[idx++] = 'w';
if (access & DEVCG_ACC_MKNOD)
acc[idx++] = 'm';
}
static char type_to_char(short type)
{
if (type == DEVCG_DEV_ALL)
return 'a';
if (type == DEVCG_DEV_CHAR)
return 'c';
if (type == DEVCG_DEV_BLOCK)
return 'b';
return 'X';
}
static void set_majmin(char *str, unsigned m)
{
if (m == ~0)
strcpy(str, "*");
else
sprintf(str, "%u", m);
}
static int devcgroup_seq_show(struct seq_file *m, void *v)
{
struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
struct dev_exception_item *ex;
char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
rcu_read_lock();
/*
* To preserve the compatibility:
* - Only show the "all devices" when the default policy is to allow
* - List the exceptions in case the default policy is to deny
* This way, the file remains as a "whitelist of devices"
*/
if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
set_access(acc, DEVCG_ACC_MASK);
set_majmin(maj, ~0);
set_majmin(min, ~0);
seq_printf(m, "%c %s:%s %s\n", type_to_char(DEVCG_DEV_ALL),
maj, min, acc);
} else {
list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
set_access(acc, ex->access);
set_majmin(maj, ex->major);
set_majmin(min, ex->minor);
seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
maj, min, acc);
}
}
rcu_read_unlock();
return 0;
}
/**
* match_exception - iterates the exception list trying to find a complete match
* @exceptions: list of exceptions
* @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
* @major: device file major number, ~0 to match all
* @minor: device file minor number, ~0 to match all
* @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
*
* It is considered a complete match if an exception is found that will
* contain the entire range of provided parameters.
*
* Return: true in case it matches an exception completely
*/
static bool match_exception(struct list_head *exceptions, short type,
u32 major, u32 minor, short access)
{
struct dev_exception_item *ex;
list_for_each_entry_rcu(ex, exceptions, list) {
if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
continue;
if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
continue;
if (ex->major != ~0 && ex->major != major)
continue;
if (ex->minor != ~0 && ex->minor != minor)
continue;
/* provided access cannot have more than the exception rule */
if (access & (~ex->access))
continue;
return true;
}
return false;
}
/**
* match_exception_partial - iterates the exception list trying to find a partial match
* @exceptions: list of exceptions
* @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
* @major: device file major number, ~0 to match all
* @minor: device file minor number, ~0 to match all
* @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
*
* It is considered a partial match if an exception's range is found to
* contain *any* of the devices specified by provided parameters. This is
* used to make sure no extra access is being granted that is forbidden by
* any of the exception list.
*
* Return: true in case the provided range mat matches an exception completely
*/
static bool match_exception_partial(struct list_head *exceptions, short type,
u32 major, u32 minor, short access)
{
struct dev_exception_item *ex;
list_for_each_entry_rcu(ex, exceptions, list,
lockdep_is_held(&devcgroup_mutex)) {
if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
continue;
if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
continue;
/*
* We must be sure that both the exception and the provided
* range aren't masking all devices
*/
if (ex->major != ~0 && major != ~0 && ex->major != major)
continue;
if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
continue;
/*
* In order to make sure the provided range isn't matching
* an exception, all its access bits shouldn't match the
* exception's access bits
*/
if (!(access & ex->access))
continue;
return true;
}
return false;
}
/**
* verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
* @dev_cgroup: dev cgroup to be tested against
* @refex: new exception
* @behavior: behavior of the exception's dev_cgroup
*
* This is used to make sure a child cgroup won't have more privileges
* than its parent
*/
static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
struct dev_exception_item *refex,
enum devcg_behavior behavior)
{
bool match = false;
RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
!lockdep_is_held(&devcgroup_mutex),
"device_cgroup:verify_new_ex called without proper synchronization");
if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
if (behavior == DEVCG_DEFAULT_ALLOW) {
/*
* new exception in the child doesn't matter, only
* adding extra restrictions
*/
return true;
} else {
/*
* new exception in the child will add more devices
* that can be accessed, so it can't match any of
* parent's exceptions, even slightly
*/
match = match_exception_partial(&dev_cgroup->exceptions,
refex->type,
refex->major,
refex->minor,
refex->access);
if (match)
return false;
return true;
}
} else {
/*
* Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
* the new exception will add access to more devices and must
* be contained completely in an parent's exception to be
* allowed
*/
match = match_exception(&dev_cgroup->exceptions, refex->type,
refex->major, refex->minor,
refex->access);
if (match)
/* parent has an exception that matches the proposed */
return true;
else
return false;
}
return false;
}
/*
* parent_has_perm:
* when adding a new allow rule to a device exception list, the rule
* must be allowed in the parent device
*/
static int parent_has_perm(struct dev_cgroup *childcg,
struct dev_exception_item *ex)
{
struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
if (!parent)
return 1;
return verify_new_ex(parent, ex, childcg->behavior);
}
/**
* parent_allows_removal - verify if it's ok to remove an exception
* @childcg: child cgroup from where the exception will be removed
* @ex: exception being removed
*
* When removing an exception in cgroups with default ALLOW policy, it must
* be checked if removing it will give the child cgroup more access than the
* parent.
*
* Return: true if it's ok to remove exception, false otherwise
*/
static bool parent_allows_removal(struct dev_cgroup *childcg,
struct dev_exception_item *ex)
{
struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
if (!parent)
return true;
/* It's always allowed to remove access to devices */
if (childcg->behavior == DEVCG_DEFAULT_DENY)
return true;
/*
* Make sure you're not removing part or a whole exception existing in
* the parent cgroup
*/
return !match_exception_partial(&parent->exceptions, ex->type,
ex->major, ex->minor, ex->access);
}
/**
* may_allow_all - checks if it's possible to change the behavior to
* allow based on parent's rules.
* @parent: device cgroup's parent
* returns: != 0 in case it's allowed, 0 otherwise
*/
static inline int may_allow_all(struct dev_cgroup *parent)
{
if (!parent)
return 1;
return parent->behavior == DEVCG_DEFAULT_ALLOW;
}
/**
* revalidate_active_exceptions - walks through the active exception list and
* revalidates the exceptions based on parent's
* behavior and exceptions. The exceptions that
* are no longer valid will be removed.
* Called with devcgroup_mutex held.
* @devcg: cgroup which exceptions will be checked
*
* This is one of the three key functions for hierarchy implementation.
* This function is responsible for re-evaluating all the cgroup's active
* exceptions due to a parent's exception change.
* Refer to Documentation/admin-guide/cgroup-v1/devices.rst for more details.
*/
static void revalidate_active_exceptions(struct dev_cgroup *devcg)
{
struct dev_exception_item *ex;
struct list_head *this, *tmp;
list_for_each_safe(this, tmp, &devcg->exceptions) {
ex = container_of(this, struct dev_exception_item, list);
if (!parent_has_perm(devcg, ex))
dev_exception_rm(devcg, ex);
}
}
/**
* propagate_exception - propagates a new exception to the children
* @devcg_root: device cgroup that added a new exception
* @ex: new exception to be propagated
*
* returns: 0 in case of success, != 0 in case of error
*/
static int propagate_exception(struct dev_cgroup *devcg_root,
struct dev_exception_item *ex)
{
struct cgroup_subsys_state *pos;
int rc = 0;
rcu_read_lock();
css_for_each_descendant_pre(pos, &devcg_root->css) {
struct dev_cgroup *devcg = css_to_devcgroup(pos);
/*
* Because devcgroup_mutex is held, no devcg will become
* online or offline during the tree walk (see on/offline
* methods), and online ones are safe to access outside RCU
* read lock without bumping refcnt.
*/
if (pos == &devcg_root->css || !is_devcg_online(devcg))
continue;
rcu_read_unlock();
/*
* in case both root's behavior and devcg is allow, a new
* restriction means adding to the exception list
*/
if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
devcg->behavior == DEVCG_DEFAULT_ALLOW) {
rc = dev_exception_add(devcg, ex);
if (rc)
return rc;
} else {
/*
* in the other possible cases:
* root's behavior: allow, devcg's: deny
* root's behavior: deny, devcg's: deny
* the exception will be removed
*/
dev_exception_rm(devcg, ex);
}
revalidate_active_exceptions(devcg);
rcu_read_lock();
}
rcu_read_unlock();
return rc;
}
/*
* Modify the exception list using allow/deny rules.
* CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
* so we can give a container CAP_MKNOD to let it create devices but not
* modify the exception list.
* It seems likely we'll want to add a CAP_CONTAINER capability to allow
* us to also grant CAP_SYS_ADMIN to containers without giving away the
* device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
*
* Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
* new access is only allowed if you're in the top-level cgroup, or your
* parent cgroup has the access you're asking for.
*/
static int devcgroup_update_access(struct dev_cgroup *devcgroup,
int filetype, char *buffer)
{
const char *b;
char temp[12]; /* 11 + 1 characters needed for a u32 */
int count, rc = 0;
struct dev_exception_item ex;
struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
struct dev_cgroup tmp_devcgrp;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
memset(&ex, 0, sizeof(ex));
memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp));
b = buffer;
switch (*b) {
case 'a':
switch (filetype) {
case DEVCG_ALLOW:
if (css_has_online_children(&devcgroup->css))
return -EINVAL;
if (!may_allow_all(parent))
return -EPERM;
if (!parent) {
devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
dev_exception_clean(devcgroup);
break;
}
INIT_LIST_HEAD(&tmp_devcgrp.exceptions);
rc = dev_exceptions_copy(&tmp_devcgrp.exceptions,
&devcgroup->exceptions);
if (rc)
return rc;
dev_exception_clean(devcgroup);
rc = dev_exceptions_copy(&devcgroup->exceptions,
&parent->exceptions);
if (rc) {
dev_exceptions_move(&devcgroup->exceptions,
&tmp_devcgrp.exceptions);
return rc;
}
devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
dev_exception_clean(&tmp_devcgrp);
break;
case DEVCG_DENY:
if (css_has_online_children(&devcgroup->css))
return -EINVAL;
dev_exception_clean(devcgroup);
devcgroup->behavior = DEVCG_DEFAULT_DENY;
break;
default:
return -EINVAL;
}
return 0;
case 'b':
ex.type = DEVCG_DEV_BLOCK;
break;
case 'c':
ex.type = DEVCG_DEV_CHAR;
break;
default:
return -EINVAL;
}
b++;
if (!isspace(*b))
return -EINVAL;
b++;
if (*b == '*') {
ex.major = ~0;
b++;
} else if (isdigit(*b)) {
memset(temp, 0, sizeof(temp));
for (count = 0; count < sizeof(temp) - 1; count++) {
temp[count] = *b;
b++;
if (!isdigit(*b))
break;
}
rc = kstrtou32(temp, 10, &ex.major);
if (rc)
return -EINVAL;
} else {
return -EINVAL;
}
if (*b != ':')
return -EINVAL;
b++;
/* read minor */
if (*b == '*') {
ex.minor = ~0;
b++;
} else if (isdigit(*b)) {
memset(temp, 0, sizeof(temp));
for (count = 0; count < sizeof(temp) - 1; count++) {
temp[count] = *b;
b++;
if (!isdigit(*b))
break;
}
rc = kstrtou32(temp, 10, &ex.minor);
if (rc)
return -EINVAL;
} else {
return -EINVAL;
}
if (!isspace(*b))
return -EINVAL;
for (b++, count = 0; count < 3; count++, b++) {
switch (*b) {
case 'r':
ex.access |= DEVCG_ACC_READ;
break;
case 'w':
ex.access |= DEVCG_ACC_WRITE;
break;
case 'm':
ex.access |= DEVCG_ACC_MKNOD;
break;
case '\n':
case '\0':
count = 3;
break;
default:
return -EINVAL;
}
}
switch (filetype) {
case DEVCG_ALLOW:
/*
* If the default policy is to allow by default, try to remove
* an matching exception instead. And be silent about it: we
* don't want to break compatibility
*/
if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
/* Check if the parent allows removing it first */
if (!parent_allows_removal(devcgroup, &ex))
return -EPERM;
dev_exception_rm(devcgroup, &ex);
break;
}
if (!parent_has_perm(devcgroup, &ex))
return -EPERM;
rc = dev_exception_add(devcgroup, &ex);
break;
case DEVCG_DENY:
/*
* If the default policy is to deny by default, try to remove
* an matching exception instead. And be silent about it: we
* don't want to break compatibility
*/
if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
dev_exception_rm(devcgroup, &ex);
else
rc = dev_exception_add(devcgroup, &ex);
if (rc)
break;
/* we only propagate new restrictions */
rc = propagate_exception(devcgroup, &ex);
break;
default:
rc = -EINVAL;
}
return rc;
}
static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
int retval;
mutex_lock(&devcgroup_mutex);
retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
of_cft(of)->private, strstrip(buf));
mutex_unlock(&devcgroup_mutex);
return retval ?: nbytes;
}
static struct cftype dev_cgroup_files[] = {
{
.name = "allow",
.write = devcgroup_access_write,
.private = DEVCG_ALLOW,
},
{
.name = "deny",
.write = devcgroup_access_write,
.private = DEVCG_DENY,
},
{
.name = "list",
.seq_show = devcgroup_seq_show,
.private = DEVCG_LIST,
},
{ } /* terminate */
};
struct cgroup_subsys devices_cgrp_subsys = {
.css_alloc = devcgroup_css_alloc,
.css_free = devcgroup_css_free,
.css_online = devcgroup_online,
.css_offline = devcgroup_offline,
.legacy_cftypes = dev_cgroup_files,
};
/**
* devcgroup_legacy_check_permission - checks if an inode operation is permitted
* @type: device type
* @major: device major number
* @minor: device minor number
* @access: combination of DEVCG_ACC_WRITE, DEVCG_ACC_READ and DEVCG_ACC_MKNOD
*
* returns 0 on success, -EPERM case the operation is not permitted
*/
static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor,
short access)
{
struct dev_cgroup *dev_cgroup;
bool rc;
rcu_read_lock();
dev_cgroup = task_devcgroup(current);
if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
/* Can't match any of the exceptions, even partially */
rc = !match_exception_partial(&dev_cgroup->exceptions,
type, major, minor, access);
else
/* Need to match completely one exception to be allowed */
rc = match_exception(&dev_cgroup->exceptions, type, major,
minor, access);
rcu_read_unlock();
if (!rc)
return -EPERM;
return 0;
}
#endif /* CONFIG_CGROUP_DEVICE */
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
{
int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
if (rc)
return rc;
#ifdef CONFIG_CGROUP_DEVICE
return devcgroup_legacy_check_permission(type, major, minor, access);
#else /* CONFIG_CGROUP_DEVICE */
return 0;
#endif /* CONFIG_CGROUP_DEVICE */
}
EXPORT_SYMBOL(devcgroup_check_permission);
#endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */
| linux-master | security/device_cgroup.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/security.h>
#include <linux/sysctl.h>
/* amount of vm to protect from userspace access by both DAC and the LSM*/
unsigned long mmap_min_addr;
/* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */
unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
/* amount of vm to protect from userspace using the LSM = CONFIG_LSM_MMAP_MIN_ADDR */
/*
* Update mmap_min_addr = max(dac_mmap_min_addr, CONFIG_LSM_MMAP_MIN_ADDR)
*/
static void update_mmap_min_addr(void)
{
#ifdef CONFIG_LSM_MMAP_MIN_ADDR
if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
mmap_min_addr = dac_mmap_min_addr;
else
mmap_min_addr = CONFIG_LSM_MMAP_MIN_ADDR;
#else
mmap_min_addr = dac_mmap_min_addr;
#endif
}
/*
* sysctl handler which just sets dac_mmap_min_addr = the new value and then
* calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly
*/
int mmap_min_addr_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
if (write && !capable(CAP_SYS_RAWIO))
return -EPERM;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
update_mmap_min_addr();
return ret;
}
static int __init init_mmap_min_addr(void)
{
update_mmap_min_addr();
return 0;
}
pure_initcall(init_mmap_min_addr);
| linux-master | security/min_addr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Security plug functions
*
* Copyright (C) 2001 WireX Communications, Inc <[email protected]>
* Copyright (C) 2001-2002 Greg Kroah-Hartman <[email protected]>
* Copyright (C) 2001 Networks Associates Technology, Inc <[email protected]>
* Copyright (C) 2016 Mellanox Technologies
* Copyright (C) 2023 Microsoft Corporation <[email protected]>
*/
#define pr_fmt(fmt) "LSM: " fmt
#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kernel_read_file.h>
#include <linux/lsm_hooks.h>
#include <linux/integrity.h>
#include <linux/ima.h>
#include <linux/evm.h>
#include <linux/fsnotify.h>
#include <linux/mman.h>
#include <linux/mount.h>
#include <linux/personality.h>
#include <linux/backing-dev.h>
#include <linux/string.h>
#include <linux/msg.h>
#include <net/flow.h>
/* How many LSMs were built into the kernel? */
#define LSM_COUNT (__end_lsm_info - __start_lsm_info)
/*
* These are descriptions of the reasons that can be passed to the
* security_locked_down() LSM hook. Placing this array here allows
* all security modules to use the same descriptions for auditing
* purposes.
*/
const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX + 1] = {
[LOCKDOWN_NONE] = "none",
[LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
[LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
[LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
[LOCKDOWN_KEXEC] = "kexec of unsigned images",
[LOCKDOWN_HIBERNATION] = "hibernation",
[LOCKDOWN_PCI_ACCESS] = "direct PCI access",
[LOCKDOWN_IOPORT] = "raw io port access",
[LOCKDOWN_MSR] = "raw MSR access",
[LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
[LOCKDOWN_DEVICE_TREE] = "modifying device tree contents",
[LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
[LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
[LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
[LOCKDOWN_DEBUGFS] = "debugfs access",
[LOCKDOWN_XMON_WR] = "xmon write access",
[LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
[LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
[LOCKDOWN_RTAS_ERROR_INJECTION] = "RTAS error injection",
[LOCKDOWN_INTEGRITY_MAX] = "integrity",
[LOCKDOWN_KCORE] = "/proc/kcore access",
[LOCKDOWN_KPROBES] = "use of kprobes",
[LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
[LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM",
[LOCKDOWN_PERF] = "unsafe use of perf",
[LOCKDOWN_TRACEFS] = "use of tracefs",
[LOCKDOWN_XMON_RW] = "xmon read and write access",
[LOCKDOWN_XFRM_SECRET] = "xfrm SA secret",
[LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
};
struct security_hook_heads security_hook_heads __ro_after_init;
static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
static struct kmem_cache *lsm_file_cache;
static struct kmem_cache *lsm_inode_cache;
char *lsm_names;
static struct lsm_blob_sizes blob_sizes __ro_after_init;
/* Boot-time LSM user choice */
static __initdata const char *chosen_lsm_order;
static __initdata const char *chosen_major_lsm;
static __initconst const char *const builtin_lsm_order = CONFIG_LSM;
/* Ordered list of LSMs to initialize. */
static __initdata struct lsm_info **ordered_lsms;
static __initdata struct lsm_info *exclusive;
static __initdata bool debug;
#define init_debug(...) \
do { \
if (debug) \
pr_info(__VA_ARGS__); \
} while (0)
static bool __init is_enabled(struct lsm_info *lsm)
{
if (!lsm->enabled)
return false;
return *lsm->enabled;
}
/* Mark an LSM's enabled flag. */
static int lsm_enabled_true __initdata = 1;
static int lsm_enabled_false __initdata = 0;
static void __init set_enabled(struct lsm_info *lsm, bool enabled)
{
/*
* When an LSM hasn't configured an enable variable, we can use
* a hard-coded location for storing the default enabled state.
*/
if (!lsm->enabled) {
if (enabled)
lsm->enabled = &lsm_enabled_true;
else
lsm->enabled = &lsm_enabled_false;
} else if (lsm->enabled == &lsm_enabled_true) {
if (!enabled)
lsm->enabled = &lsm_enabled_false;
} else if (lsm->enabled == &lsm_enabled_false) {
if (enabled)
lsm->enabled = &lsm_enabled_true;
} else {
*lsm->enabled = enabled;
}
}
/* Is an LSM already listed in the ordered LSMs list? */
static bool __init exists_ordered_lsm(struct lsm_info *lsm)
{
struct lsm_info **check;
for (check = ordered_lsms; *check; check++)
if (*check == lsm)
return true;
return false;
}
/* Append an LSM to the list of ordered LSMs to initialize. */
static int last_lsm __initdata;
static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
{
/* Ignore duplicate selections. */
if (exists_ordered_lsm(lsm))
return;
if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from))
return;
/* Enable this LSM, if it is not already set. */
if (!lsm->enabled)
lsm->enabled = &lsm_enabled_true;
ordered_lsms[last_lsm++] = lsm;
init_debug("%s ordered: %s (%s)\n", from, lsm->name,
is_enabled(lsm) ? "enabled" : "disabled");
}
/* Is an LSM allowed to be initialized? */
static bool __init lsm_allowed(struct lsm_info *lsm)
{
/* Skip if the LSM is disabled. */
if (!is_enabled(lsm))
return false;
/* Not allowed if another exclusive LSM already initialized. */
if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
init_debug("exclusive disabled: %s\n", lsm->name);
return false;
}
return true;
}
static void __init lsm_set_blob_size(int *need, int *lbs)
{
int offset;
if (*need <= 0)
return;
offset = ALIGN(*lbs, sizeof(void *));
*lbs = offset + *need;
*need = offset;
}
static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
{
if (!needed)
return;
lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
/*
* The inode blob gets an rcu_head in addition to
* what the modules might need.
*/
if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
blob_sizes.lbs_inode = sizeof(struct rcu_head);
lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock);
lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
lsm_set_blob_size(&needed->lbs_xattr_count,
&blob_sizes.lbs_xattr_count);
}
/* Prepare LSM for initialization. */
static void __init prepare_lsm(struct lsm_info *lsm)
{
int enabled = lsm_allowed(lsm);
/* Record enablement (to handle any following exclusive LSMs). */
set_enabled(lsm, enabled);
/* If enabled, do pre-initialization work. */
if (enabled) {
if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
exclusive = lsm;
init_debug("exclusive chosen: %s\n", lsm->name);
}
lsm_set_blob_sizes(lsm->blobs);
}
}
/* Initialize a given LSM, if it is enabled. */
static void __init initialize_lsm(struct lsm_info *lsm)
{
if (is_enabled(lsm)) {
int ret;
init_debug("initializing %s\n", lsm->name);
ret = lsm->init();
WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
}
}
/* Populate ordered LSMs list from comma-separated LSM name list. */
static void __init ordered_lsm_parse(const char *order, const char *origin)
{
struct lsm_info *lsm;
char *sep, *name, *next;
/* LSM_ORDER_FIRST is always first. */
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (lsm->order == LSM_ORDER_FIRST)
append_ordered_lsm(lsm, " first");
}
/* Process "security=", if given. */
if (chosen_major_lsm) {
struct lsm_info *major;
/*
* To match the original "security=" behavior, this
* explicitly does NOT fallback to another Legacy Major
* if the selected one was separately disabled: disable
* all non-matching Legacy Major LSMs.
*/
for (major = __start_lsm_info; major < __end_lsm_info;
major++) {
if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
strcmp(major->name, chosen_major_lsm) != 0) {
set_enabled(major, false);
init_debug("security=%s disabled: %s (only one legacy major LSM)\n",
chosen_major_lsm, major->name);
}
}
}
sep = kstrdup(order, GFP_KERNEL);
next = sep;
/* Walk the list, looking for matching LSMs. */
while ((name = strsep(&next, ",")) != NULL) {
bool found = false;
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (strcmp(lsm->name, name) == 0) {
if (lsm->order == LSM_ORDER_MUTABLE)
append_ordered_lsm(lsm, origin);
found = true;
}
}
if (!found)
init_debug("%s ignored: %s (not built into kernel)\n",
origin, name);
}
/* Process "security=", if given. */
if (chosen_major_lsm) {
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (exists_ordered_lsm(lsm))
continue;
if (strcmp(lsm->name, chosen_major_lsm) == 0)
append_ordered_lsm(lsm, "security=");
}
}
/* LSM_ORDER_LAST is always last. */
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (lsm->order == LSM_ORDER_LAST)
append_ordered_lsm(lsm, " last");
}
/* Disable all LSMs not in the ordered list. */
for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
if (exists_ordered_lsm(lsm))
continue;
set_enabled(lsm, false);
init_debug("%s skipped: %s (not in requested order)\n",
origin, lsm->name);
}
kfree(sep);
}
static void __init lsm_early_cred(struct cred *cred);
static void __init lsm_early_task(struct task_struct *task);
static int lsm_append(const char *new, char **result);
static void __init report_lsm_order(void)
{
struct lsm_info **lsm, *early;
int first = 0;
pr_info("initializing lsm=");
/* Report each enabled LSM name, comma separated. */
for (early = __start_early_lsm_info;
early < __end_early_lsm_info; early++)
if (is_enabled(early))
pr_cont("%s%s", first++ == 0 ? "" : ",", early->name);
for (lsm = ordered_lsms; *lsm; lsm++)
if (is_enabled(*lsm))
pr_cont("%s%s", first++ == 0 ? "" : ",", (*lsm)->name);
pr_cont("\n");
}
static void __init ordered_lsm_init(void)
{
struct lsm_info **lsm;
ordered_lsms = kcalloc(LSM_COUNT + 1, sizeof(*ordered_lsms),
GFP_KERNEL);
if (chosen_lsm_order) {
if (chosen_major_lsm) {
pr_warn("security=%s is ignored because it is superseded by lsm=%s\n",
chosen_major_lsm, chosen_lsm_order);
chosen_major_lsm = NULL;
}
ordered_lsm_parse(chosen_lsm_order, "cmdline");
} else
ordered_lsm_parse(builtin_lsm_order, "builtin");
for (lsm = ordered_lsms; *lsm; lsm++)
prepare_lsm(*lsm);
report_lsm_order();
init_debug("cred blob size = %d\n", blob_sizes.lbs_cred);
init_debug("file blob size = %d\n", blob_sizes.lbs_file);
init_debug("inode blob size = %d\n", blob_sizes.lbs_inode);
init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc);
init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg);
init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock);
init_debug("task blob size = %d\n", blob_sizes.lbs_task);
init_debug("xattr slots = %d\n", blob_sizes.lbs_xattr_count);
/*
* Create any kmem_caches needed for blobs
*/
if (blob_sizes.lbs_file)
lsm_file_cache = kmem_cache_create("lsm_file_cache",
blob_sizes.lbs_file, 0,
SLAB_PANIC, NULL);
if (blob_sizes.lbs_inode)
lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
blob_sizes.lbs_inode, 0,
SLAB_PANIC, NULL);
lsm_early_cred((struct cred *) current->cred);
lsm_early_task(current);
for (lsm = ordered_lsms; *lsm; lsm++)
initialize_lsm(*lsm);
kfree(ordered_lsms);
}
int __init early_security_init(void)
{
struct lsm_info *lsm;
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
INIT_HLIST_HEAD(&security_hook_heads.NAME);
#include "linux/lsm_hook_defs.h"
#undef LSM_HOOK
for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
if (!lsm->enabled)
lsm->enabled = &lsm_enabled_true;
prepare_lsm(lsm);
initialize_lsm(lsm);
}
return 0;
}
/**
* security_init - initializes the security framework
*
* This should be called early in the kernel initialization sequence.
*/
int __init security_init(void)
{
struct lsm_info *lsm;
init_debug("legacy security=%s\n", chosen_major_lsm ? : " *unspecified*");
init_debug(" CONFIG_LSM=%s\n", builtin_lsm_order);
init_debug("boot arg lsm=%s\n", chosen_lsm_order ? : " *unspecified*");
/*
* Append the names of the early LSM modules now that kmalloc() is
* available
*/
for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
init_debug(" early started: %s (%s)\n", lsm->name,
is_enabled(lsm) ? "enabled" : "disabled");
if (lsm->enabled)
lsm_append(lsm->name, &lsm_names);
}
/* Load LSMs in specified order. */
ordered_lsm_init();
return 0;
}
/* Save user chosen LSM */
static int __init choose_major_lsm(char *str)
{
chosen_major_lsm = str;
return 1;
}
__setup("security=", choose_major_lsm);
/* Explicitly choose LSM initialization order. */
static int __init choose_lsm_order(char *str)
{
chosen_lsm_order = str;
return 1;
}
__setup("lsm=", choose_lsm_order);
/* Enable LSM order debugging. */
static int __init enable_debug(char *str)
{
debug = true;
return 1;
}
__setup("lsm.debug", enable_debug);
static bool match_last_lsm(const char *list, const char *lsm)
{
const char *last;
if (WARN_ON(!list || !lsm))
return false;
last = strrchr(list, ',');
if (last)
/* Pass the comma, strcmp() will check for '\0' */
last++;
else
last = list;
return !strcmp(last, lsm);
}
static int lsm_append(const char *new, char **result)
{
char *cp;
if (*result == NULL) {
*result = kstrdup(new, GFP_KERNEL);
if (*result == NULL)
return -ENOMEM;
} else {
/* Check if it is the last registered name */
if (match_last_lsm(*result, new))
return 0;
cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new);
if (cp == NULL)
return -ENOMEM;
kfree(*result);
*result = cp;
}
return 0;
}
/**
* security_add_hooks - Add a modules hooks to the hook lists.
* @hooks: the hooks to add
* @count: the number of hooks to add
* @lsm: the name of the security module
*
* Each LSM has to register its hooks with the infrastructure.
*/
void __init security_add_hooks(struct security_hook_list *hooks, int count,
const char *lsm)
{
int i;
for (i = 0; i < count; i++) {
hooks[i].lsm = lsm;
hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
}
/*
* Don't try to append during early_security_init(), we'll come back
* and fix this up afterwards.
*/
if (slab_is_available()) {
if (lsm_append(lsm, &lsm_names) < 0)
panic("%s - Cannot get early memory.\n", __func__);
}
}
int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
event, data);
}
EXPORT_SYMBOL(call_blocking_lsm_notifier);
int register_blocking_lsm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
nb);
}
EXPORT_SYMBOL(register_blocking_lsm_notifier);
int unregister_blocking_lsm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
nb);
}
EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
/**
* lsm_cred_alloc - allocate a composite cred blob
* @cred: the cred that needs a blob
* @gfp: allocation type
*
* Allocate the cred blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
{
if (blob_sizes.lbs_cred == 0) {
cred->security = NULL;
return 0;
}
cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
if (cred->security == NULL)
return -ENOMEM;
return 0;
}
/**
* lsm_early_cred - during initialization allocate a composite cred blob
* @cred: the cred that needs a blob
*
* Allocate the cred blob for all the modules
*/
static void __init lsm_early_cred(struct cred *cred)
{
int rc = lsm_cred_alloc(cred, GFP_KERNEL);
if (rc)
panic("%s: Early cred alloc failed.\n", __func__);
}
/**
* lsm_file_alloc - allocate a composite file blob
* @file: the file that needs a blob
*
* Allocate the file blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_file_alloc(struct file *file)
{
if (!lsm_file_cache) {
file->f_security = NULL;
return 0;
}
file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
if (file->f_security == NULL)
return -ENOMEM;
return 0;
}
/**
* lsm_inode_alloc - allocate a composite inode blob
* @inode: the inode that needs a blob
*
* Allocate the inode blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
int lsm_inode_alloc(struct inode *inode)
{
if (!lsm_inode_cache) {
inode->i_security = NULL;
return 0;
}
inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
if (inode->i_security == NULL)
return -ENOMEM;
return 0;
}
/**
* lsm_task_alloc - allocate a composite task blob
* @task: the task that needs a blob
*
* Allocate the task blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_task_alloc(struct task_struct *task)
{
if (blob_sizes.lbs_task == 0) {
task->security = NULL;
return 0;
}
task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL);
if (task->security == NULL)
return -ENOMEM;
return 0;
}
/**
* lsm_ipc_alloc - allocate a composite ipc blob
* @kip: the ipc that needs a blob
*
* Allocate the ipc blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
{
if (blob_sizes.lbs_ipc == 0) {
kip->security = NULL;
return 0;
}
kip->security = kzalloc(blob_sizes.lbs_ipc, GFP_KERNEL);
if (kip->security == NULL)
return -ENOMEM;
return 0;
}
/**
* lsm_msg_msg_alloc - allocate a composite msg_msg blob
* @mp: the msg_msg that needs a blob
*
* Allocate the ipc blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_msg_msg_alloc(struct msg_msg *mp)
{
if (blob_sizes.lbs_msg_msg == 0) {
mp->security = NULL;
return 0;
}
mp->security = kzalloc(blob_sizes.lbs_msg_msg, GFP_KERNEL);
if (mp->security == NULL)
return -ENOMEM;
return 0;
}
/**
* lsm_early_task - during initialization allocate a composite task blob
* @task: the task that needs a blob
*
* Allocate the task blob for all the modules
*/
static void __init lsm_early_task(struct task_struct *task)
{
int rc = lsm_task_alloc(task);
if (rc)
panic("%s: Early task alloc failed.\n", __func__);
}
/**
* lsm_superblock_alloc - allocate a composite superblock blob
* @sb: the superblock that needs a blob
*
* Allocate the superblock blob for all the modules
*
* Returns 0, or -ENOMEM if memory can't be allocated.
*/
static int lsm_superblock_alloc(struct super_block *sb)
{
if (blob_sizes.lbs_superblock == 0) {
sb->s_security = NULL;
return 0;
}
sb->s_security = kzalloc(blob_sizes.lbs_superblock, GFP_KERNEL);
if (sb->s_security == NULL)
return -ENOMEM;
return 0;
}
/*
* The default value of the LSM hook is defined in linux/lsm_hook_defs.h and
* can be accessed with:
*
* LSM_RET_DEFAULT(<hook_name>)
*
* The macros below define static constants for the default value of each
* LSM hook.
*/
#define LSM_RET_DEFAULT(NAME) (NAME##_default)
#define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME)
#define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \
static const int __maybe_unused LSM_RET_DEFAULT(NAME) = (DEFAULT);
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME)
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
/*
* Hook list operation macros.
*
* call_void_hook:
* This is a hook that does not return a value.
*
* call_int_hook:
* This is a hook that returns a value.
*/
#define call_void_hook(FUNC, ...) \
do { \
struct security_hook_list *P; \
\
hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
P->hook.FUNC(__VA_ARGS__); \
} while (0)
#define call_int_hook(FUNC, IRC, ...) ({ \
int RC = IRC; \
do { \
struct security_hook_list *P; \
\
hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
RC = P->hook.FUNC(__VA_ARGS__); \
if (RC != 0) \
break; \
} \
} while (0); \
RC; \
})
/* Security operations */
/**
* security_binder_set_context_mgr() - Check if becoming binder ctx mgr is ok
* @mgr: task credentials of current binder process
*
* Check whether @mgr is allowed to be the binder context manager.
*
* Return: Return 0 if permission is granted.
*/
int security_binder_set_context_mgr(const struct cred *mgr)
{
return call_int_hook(binder_set_context_mgr, 0, mgr);
}
/**
* security_binder_transaction() - Check if a binder transaction is allowed
* @from: sending process
* @to: receiving process
*
* Check whether @from is allowed to invoke a binder transaction call to @to.
*
* Return: Returns 0 if permission is granted.
*/
int security_binder_transaction(const struct cred *from,
const struct cred *to)
{
return call_int_hook(binder_transaction, 0, from, to);
}
/**
* security_binder_transfer_binder() - Check if a binder transfer is allowed
* @from: sending process
* @to: receiving process
*
* Check whether @from is allowed to transfer a binder reference to @to.
*
* Return: Returns 0 if permission is granted.
*/
int security_binder_transfer_binder(const struct cred *from,
const struct cred *to)
{
return call_int_hook(binder_transfer_binder, 0, from, to);
}
/**
* security_binder_transfer_file() - Check if a binder file xfer is allowed
* @from: sending process
* @to: receiving process
* @file: file being transferred
*
* Check whether @from is allowed to transfer @file to @to.
*
* Return: Returns 0 if permission is granted.
*/
int security_binder_transfer_file(const struct cred *from,
const struct cred *to, const struct file *file)
{
return call_int_hook(binder_transfer_file, 0, from, to, file);
}
/**
* security_ptrace_access_check() - Check if tracing is allowed
* @child: target process
* @mode: PTRACE_MODE flags
*
* Check permission before allowing the current process to trace the @child
* process. Security modules may also want to perform a process tracing check
* during an execve in the set_security or apply_creds hooks of tracing check
* during an execve in the bprm_set_creds hook of binprm_security_ops if the
* process is being traced and its security attributes would be changed by the
* execve.
*
* Return: Returns 0 if permission is granted.
*/
int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
return call_int_hook(ptrace_access_check, 0, child, mode);
}
/**
* security_ptrace_traceme() - Check if tracing is allowed
* @parent: tracing process
*
* Check that the @parent process has sufficient permission to trace the
* current process before allowing the current process to present itself to the
* @parent process for tracing.
*
* Return: Returns 0 if permission is granted.
*/
int security_ptrace_traceme(struct task_struct *parent)
{
return call_int_hook(ptrace_traceme, 0, parent);
}
/**
* security_capget() - Get the capability sets for a process
* @target: target process
* @effective: effective capability set
* @inheritable: inheritable capability set
* @permitted: permitted capability set
*
* Get the @effective, @inheritable, and @permitted capability sets for the
* @target process. The hook may also perform permission checking to determine
* if the current process is allowed to see the capability sets of the @target
* process.
*
* Return: Returns 0 if the capability sets were successfully obtained.
*/
int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
return call_int_hook(capget, 0, target,
effective, inheritable, permitted);
}
/**
* security_capset() - Set the capability sets for a process
* @new: new credentials for the target process
* @old: current credentials of the target process
* @effective: effective capability set
* @inheritable: inheritable capability set
* @permitted: permitted capability set
*
* Set the @effective, @inheritable, and @permitted capability sets for the
* current process.
*
* Return: Returns 0 and update @new if permission is granted.
*/
int security_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
return call_int_hook(capset, 0, new, old,
effective, inheritable, permitted);
}
/**
* security_capable() - Check if a process has the necessary capability
* @cred: credentials to examine
* @ns: user namespace
* @cap: capability requested
* @opts: capability check options
*
* Check whether the @tsk process has the @cap capability in the indicated
* credentials. @cap contains the capability <include/linux/capability.h>.
* @opts contains options for the capable check <include/linux/security.h>.
*
* Return: Returns 0 if the capability is granted.
*/
int security_capable(const struct cred *cred,
struct user_namespace *ns,
int cap,
unsigned int opts)
{
return call_int_hook(capable, 0, cred, ns, cap, opts);
}
/**
* security_quotactl() - Check if a quotactl() syscall is allowed for this fs
* @cmds: commands
* @type: type
* @id: id
* @sb: filesystem
*
* Check whether the quotactl syscall is allowed for this @sb.
*
* Return: Returns 0 if permission is granted.
*/
int security_quotactl(int cmds, int type, int id, struct super_block *sb)
{
return call_int_hook(quotactl, 0, cmds, type, id, sb);
}
/**
* security_quota_on() - Check if QUOTAON is allowed for a dentry
* @dentry: dentry
*
* Check whether QUOTAON is allowed for @dentry.
*
* Return: Returns 0 if permission is granted.
*/
int security_quota_on(struct dentry *dentry)
{
return call_int_hook(quota_on, 0, dentry);
}
/**
* security_syslog() - Check if accessing the kernel message ring is allowed
* @type: SYSLOG_ACTION_* type
*
* Check permission before accessing the kernel message ring or changing
* logging to the console. See the syslog(2) manual page for an explanation of
* the @type values.
*
* Return: Return 0 if permission is granted.
*/
int security_syslog(int type)
{
return call_int_hook(syslog, 0, type);
}
/**
* security_settime64() - Check if changing the system time is allowed
* @ts: new time
* @tz: timezone
*
* Check permission to change the system time, struct timespec64 is defined in
* <include/linux/time64.h> and timezone is defined in <include/linux/time.h>.
*
* Return: Returns 0 if permission is granted.
*/
int security_settime64(const struct timespec64 *ts, const struct timezone *tz)
{
return call_int_hook(settime, 0, ts, tz);
}
/**
* security_vm_enough_memory_mm() - Check if allocating a new mem map is allowed
* @mm: mm struct
* @pages: number of pages
*
* Check permissions for allocating a new virtual mapping. If all LSMs return
* a positive value, __vm_enough_memory() will be called with cap_sys_admin
* set. If at least one LSM returns 0 or negative, __vm_enough_memory() will be
* called with cap_sys_admin cleared.
*
* Return: Returns 0 if permission is granted by the LSM infrastructure to the
* caller.
*/
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
struct security_hook_list *hp;
int cap_sys_admin = 1;
int rc;
/*
* The module will respond with a positive value if
* it thinks the __vm_enough_memory() call should be
* made with the cap_sys_admin set. If all of the modules
* agree that it should be set it will. If any module
* thinks it should not be set it won't.
*/
hlist_for_each_entry(hp, &security_hook_heads.vm_enough_memory, list) {
rc = hp->hook.vm_enough_memory(mm, pages);
if (rc <= 0) {
cap_sys_admin = 0;
break;
}
}
return __vm_enough_memory(mm, pages, cap_sys_admin);
}
/**
* security_bprm_creds_for_exec() - Prepare the credentials for exec()
* @bprm: binary program information
*
* If the setup in prepare_exec_creds did not setup @bprm->cred->security
* properly for executing @bprm->file, update the LSM's portion of
* @bprm->cred->security to be what commit_creds needs to install for the new
* program. This hook may also optionally check permissions (e.g. for
* transitions between security domains). The hook must set @bprm->secureexec
* to 1 if AT_SECURE should be set to request libc enable secure mode. @bprm
* contains the linux_binprm structure.
*
* Return: Returns 0 if the hook is successful and permission is granted.
*/
int security_bprm_creds_for_exec(struct linux_binprm *bprm)
{
return call_int_hook(bprm_creds_for_exec, 0, bprm);
}
/**
* security_bprm_creds_from_file() - Update linux_binprm creds based on file
* @bprm: binary program information
* @file: associated file
*
* If @file is setpcap, suid, sgid or otherwise marked to change privilege upon
* exec, update @bprm->cred to reflect that change. This is called after
* finding the binary that will be executed without an interpreter. This
* ensures that the credentials will not be derived from a script that the
* binary will need to reopen, which when reopend may end up being a completely
* different file. This hook may also optionally check permissions (e.g. for
* transitions between security domains). The hook must set @bprm->secureexec
* to 1 if AT_SECURE should be set to request libc enable secure mode. The
* hook must add to @bprm->per_clear any personality flags that should be
* cleared from current->personality. @bprm contains the linux_binprm
* structure.
*
* Return: Returns 0 if the hook is successful and permission is granted.
*/
int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file)
{
return call_int_hook(bprm_creds_from_file, 0, bprm, file);
}
/**
* security_bprm_check() - Mediate binary handler search
* @bprm: binary program information
*
* This hook mediates the point when a search for a binary handler will begin.
* It allows a check against the @bprm->cred->security value which was set in
* the preceding creds_for_exec call. The argv list and envp list are reliably
* available in @bprm. This hook may be called multiple times during a single
* execve. @bprm contains the linux_binprm structure.
*
* Return: Returns 0 if the hook is successful and permission is granted.
*/
int security_bprm_check(struct linux_binprm *bprm)
{
int ret;
ret = call_int_hook(bprm_check_security, 0, bprm);
if (ret)
return ret;
return ima_bprm_check(bprm);
}
/**
* security_bprm_committing_creds() - Install creds for a process during exec()
* @bprm: binary program information
*
* Prepare to install the new security attributes of a process being
* transformed by an execve operation, based on the old credentials pointed to
* by @current->cred and the information set in @bprm->cred by the
* bprm_creds_for_exec hook. @bprm points to the linux_binprm structure. This
* hook is a good place to perform state changes on the process such as closing
* open file descriptors to which access will no longer be granted when the
* attributes are changed. This is called immediately before commit_creds().
*/
void security_bprm_committing_creds(struct linux_binprm *bprm)
{
call_void_hook(bprm_committing_creds, bprm);
}
/**
* security_bprm_committed_creds() - Tidy up after cred install during exec()
* @bprm: binary program information
*
* Tidy up after the installation of the new security attributes of a process
* being transformed by an execve operation. The new credentials have, by this
* point, been set to @current->cred. @bprm points to the linux_binprm
* structure. This hook is a good place to perform state changes on the
* process such as clearing out non-inheritable signal state. This is called
* immediately after commit_creds().
*/
void security_bprm_committed_creds(struct linux_binprm *bprm)
{
call_void_hook(bprm_committed_creds, bprm);
}
/**
* security_fs_context_submount() - Initialise fc->security
* @fc: new filesystem context
* @reference: dentry reference for submount/remount
*
* Fill out the ->security field for a new fs_context.
*
* Return: Returns 0 on success or negative error code on failure.
*/
int security_fs_context_submount(struct fs_context *fc, struct super_block *reference)
{
return call_int_hook(fs_context_submount, 0, fc, reference);
}
/**
* security_fs_context_dup() - Duplicate a fs_context LSM blob
* @fc: destination filesystem context
* @src_fc: source filesystem context
*
* Allocate and attach a security structure to sc->security. This pointer is
* initialised to NULL by the caller. @fc indicates the new filesystem context.
* @src_fc indicates the original filesystem context.
*
* Return: Returns 0 on success or a negative error code on failure.
*/
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
{
return call_int_hook(fs_context_dup, 0, fc, src_fc);
}
/**
* security_fs_context_parse_param() - Configure a filesystem context
* @fc: filesystem context
* @param: filesystem parameter
*
* Userspace provided a parameter to configure a superblock. The LSM can
* consume the parameter or return it to the caller for use elsewhere.
*
* Return: If the parameter is used by the LSM it should return 0, if it is
* returned to the caller -ENOPARAM is returned, otherwise a negative
* error code is returned.
*/
int security_fs_context_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
struct security_hook_list *hp;
int trc;
int rc = -ENOPARAM;
hlist_for_each_entry(hp, &security_hook_heads.fs_context_parse_param,
list) {
trc = hp->hook.fs_context_parse_param(fc, param);
if (trc == 0)
rc = 0;
else if (trc != -ENOPARAM)
return trc;
}
return rc;
}
/**
* security_sb_alloc() - Allocate a super_block LSM blob
* @sb: filesystem superblock
*
* Allocate and attach a security structure to the sb->s_security field. The
* s_security field is initialized to NULL when the structure is allocated.
* @sb contains the super_block structure to be modified.
*
* Return: Returns 0 if operation was successful.
*/
int security_sb_alloc(struct super_block *sb)
{
int rc = lsm_superblock_alloc(sb);
if (unlikely(rc))
return rc;
rc = call_int_hook(sb_alloc_security, 0, sb);
if (unlikely(rc))
security_sb_free(sb);
return rc;
}
/**
* security_sb_delete() - Release super_block LSM associated objects
* @sb: filesystem superblock
*
* Release objects tied to a superblock (e.g. inodes). @sb contains the
* super_block structure being released.
*/
void security_sb_delete(struct super_block *sb)
{
call_void_hook(sb_delete, sb);
}
/**
* security_sb_free() - Free a super_block LSM blob
* @sb: filesystem superblock
*
* Deallocate and clear the sb->s_security field. @sb contains the super_block
* structure to be modified.
*/
void security_sb_free(struct super_block *sb)
{
call_void_hook(sb_free_security, sb);
kfree(sb->s_security);
sb->s_security = NULL;
}
/**
* security_free_mnt_opts() - Free memory associated with mount options
* @mnt_opts: LSM processed mount options
*
* Free memory associated with @mnt_ops.
*/
void security_free_mnt_opts(void **mnt_opts)
{
if (!*mnt_opts)
return;
call_void_hook(sb_free_mnt_opts, *mnt_opts);
*mnt_opts = NULL;
}
EXPORT_SYMBOL(security_free_mnt_opts);
/**
* security_sb_eat_lsm_opts() - Consume LSM mount options
* @options: mount options
* @mnt_opts: LSM processed mount options
*
* Eat (scan @options) and save them in @mnt_opts.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_sb_eat_lsm_opts(char *options, void **mnt_opts)
{
return call_int_hook(sb_eat_lsm_opts, 0, options, mnt_opts);
}
EXPORT_SYMBOL(security_sb_eat_lsm_opts);
/**
* security_sb_mnt_opts_compat() - Check if new mount options are allowed
* @sb: filesystem superblock
* @mnt_opts: new mount options
*
* Determine if the new mount options in @mnt_opts are allowed given the
* existing mounted filesystem at @sb. @sb superblock being compared.
*
* Return: Returns 0 if options are compatible.
*/
int security_sb_mnt_opts_compat(struct super_block *sb,
void *mnt_opts)
{
return call_int_hook(sb_mnt_opts_compat, 0, sb, mnt_opts);
}
EXPORT_SYMBOL(security_sb_mnt_opts_compat);
/**
* security_sb_remount() - Verify no incompatible mount changes during remount
* @sb: filesystem superblock
* @mnt_opts: (re)mount options
*
* Extracts security system specific mount options and verifies no changes are
* being made to those options.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_remount(struct super_block *sb,
void *mnt_opts)
{
return call_int_hook(sb_remount, 0, sb, mnt_opts);
}
EXPORT_SYMBOL(security_sb_remount);
/**
* security_sb_kern_mount() - Check if a kernel mount is allowed
* @sb: filesystem superblock
*
* Mount this @sb if allowed by permissions.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_kern_mount(struct super_block *sb)
{
return call_int_hook(sb_kern_mount, 0, sb);
}
/**
* security_sb_show_options() - Output the mount options for a superblock
* @m: output file
* @sb: filesystem superblock
*
* Show (print on @m) mount options for this @sb.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_sb_show_options(struct seq_file *m, struct super_block *sb)
{
return call_int_hook(sb_show_options, 0, m, sb);
}
/**
* security_sb_statfs() - Check if accessing fs stats is allowed
* @dentry: superblock handle
*
* Check permission before obtaining filesystem statistics for the @mnt
* mountpoint. @dentry is a handle on the superblock for the filesystem.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_statfs(struct dentry *dentry)
{
return call_int_hook(sb_statfs, 0, dentry);
}
/**
* security_sb_mount() - Check permission for mounting a filesystem
* @dev_name: filesystem backing device
* @path: mount point
* @type: filesystem type
* @flags: mount flags
* @data: filesystem specific data
*
* Check permission before an object specified by @dev_name is mounted on the
* mount point named by @nd. For an ordinary mount, @dev_name identifies a
* device if the file system type requires a device. For a remount
* (@flags & MS_REMOUNT), @dev_name is irrelevant. For a loopback/bind mount
* (@flags & MS_BIND), @dev_name identifies the pathname of the object being
* mounted.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags, void *data)
{
return call_int_hook(sb_mount, 0, dev_name, path, type, flags, data);
}
/**
* security_sb_umount() - Check permission for unmounting a filesystem
* @mnt: mounted filesystem
* @flags: unmount flags
*
* Check permission before the @mnt file system is unmounted.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_umount(struct vfsmount *mnt, int flags)
{
return call_int_hook(sb_umount, 0, mnt, flags);
}
/**
* security_sb_pivotroot() - Check permissions for pivoting the rootfs
* @old_path: new location for current rootfs
* @new_path: location of the new rootfs
*
* Check permission before pivoting the root filesystem.
*
* Return: Returns 0 if permission is granted.
*/
int security_sb_pivotroot(const struct path *old_path,
const struct path *new_path)
{
return call_int_hook(sb_pivotroot, 0, old_path, new_path);
}
/**
* security_sb_set_mnt_opts() - Set the mount options for a filesystem
* @sb: filesystem superblock
* @mnt_opts: binary mount options
* @kern_flags: kernel flags (in)
* @set_kern_flags: kernel flags (out)
*
* Set the security relevant mount options used for a superblock.
*
* Return: Returns 0 on success, error on failure.
*/
int security_sb_set_mnt_opts(struct super_block *sb,
void *mnt_opts,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
return call_int_hook(sb_set_mnt_opts,
mnt_opts ? -EOPNOTSUPP : 0, sb,
mnt_opts, kern_flags, set_kern_flags);
}
EXPORT_SYMBOL(security_sb_set_mnt_opts);
/**
* security_sb_clone_mnt_opts() - Duplicate superblock mount options
* @oldsb: source superblock
* @newsb: destination superblock
* @kern_flags: kernel flags (in)
* @set_kern_flags: kernel flags (out)
*
* Copy all security options from a given superblock to another.
*
* Return: Returns 0 on success, error on failure.
*/
int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb,
unsigned long kern_flags,
unsigned long *set_kern_flags)
{
return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb,
kern_flags, set_kern_flags);
}
EXPORT_SYMBOL(security_sb_clone_mnt_opts);
/**
* security_move_mount() - Check permissions for moving a mount
* @from_path: source mount point
* @to_path: destination mount point
*
* Check permission before a mount is moved.
*
* Return: Returns 0 if permission is granted.
*/
int security_move_mount(const struct path *from_path,
const struct path *to_path)
{
return call_int_hook(move_mount, 0, from_path, to_path);
}
/**
* security_path_notify() - Check if setting a watch is allowed
* @path: file path
* @mask: event mask
* @obj_type: file path type
*
* Check permissions before setting a watch on events as defined by @mask, on
* an object at @path, whose type is defined by @obj_type.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_notify(const struct path *path, u64 mask,
unsigned int obj_type)
{
return call_int_hook(path_notify, 0, path, mask, obj_type);
}
/**
* security_inode_alloc() - Allocate an inode LSM blob
* @inode: the inode
*
* Allocate and attach a security structure to @inode->i_security. The
* i_security field is initialized to NULL when the inode structure is
* allocated.
*
* Return: Return 0 if operation was successful.
*/
int security_inode_alloc(struct inode *inode)
{
int rc = lsm_inode_alloc(inode);
if (unlikely(rc))
return rc;
rc = call_int_hook(inode_alloc_security, 0, inode);
if (unlikely(rc))
security_inode_free(inode);
return rc;
}
static void inode_free_by_rcu(struct rcu_head *head)
{
/*
* The rcu head is at the start of the inode blob
*/
kmem_cache_free(lsm_inode_cache, head);
}
/**
* security_inode_free() - Free an inode's LSM blob
* @inode: the inode
*
* Deallocate the inode security structure and set @inode->i_security to NULL.
*/
void security_inode_free(struct inode *inode)
{
integrity_inode_free(inode);
call_void_hook(inode_free_security, inode);
/*
* The inode may still be referenced in a path walk and
* a call to security_inode_permission() can be made
* after inode_free_security() is called. Ideally, the VFS
* wouldn't do this, but fixing that is a much harder
* job. For now, simply free the i_security via RCU, and
* leave the current inode->i_security pointer intact.
* The inode will be freed after the RCU grace period too.
*/
if (inode->i_security)
call_rcu((struct rcu_head *)inode->i_security,
inode_free_by_rcu);
}
/**
* security_dentry_init_security() - Perform dentry initialization
* @dentry: the dentry to initialize
* @mode: mode used to determine resource type
* @name: name of the last path component
* @xattr_name: name of the security/LSM xattr
* @ctx: pointer to the resulting LSM context
* @ctxlen: length of @ctx
*
* Compute a context for a dentry as the inode is not yet available since NFSv4
* has no label backed by an EA anyway. It is important to note that
* @xattr_name does not need to be free'd by the caller, it is a static string.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name,
const char **xattr_name, void **ctx,
u32 *ctxlen)
{
struct security_hook_list *hp;
int rc;
/*
* Only one module will provide a security context.
*/
hlist_for_each_entry(hp, &security_hook_heads.dentry_init_security,
list) {
rc = hp->hook.dentry_init_security(dentry, mode, name,
xattr_name, ctx, ctxlen);
if (rc != LSM_RET_DEFAULT(dentry_init_security))
return rc;
}
return LSM_RET_DEFAULT(dentry_init_security);
}
EXPORT_SYMBOL(security_dentry_init_security);
/**
* security_dentry_create_files_as() - Perform dentry initialization
* @dentry: the dentry to initialize
* @mode: mode used to determine resource type
* @name: name of the last path component
* @old: creds to use for LSM context calculations
* @new: creds to modify
*
* Compute a context for a dentry as the inode is not yet available and set
* that context in passed in creds so that new files are created using that
* context. Context is calculated using the passed in creds and not the creds
* of the caller.
*
* Return: Returns 0 on success, error on failure.
*/
int security_dentry_create_files_as(struct dentry *dentry, int mode,
struct qstr *name,
const struct cred *old, struct cred *new)
{
return call_int_hook(dentry_create_files_as, 0, dentry, mode,
name, old, new);
}
EXPORT_SYMBOL(security_dentry_create_files_as);
/**
* security_inode_init_security() - Initialize an inode's LSM context
* @inode: the inode
* @dir: parent directory
* @qstr: last component of the pathname
* @initxattrs: callback function to write xattrs
* @fs_data: filesystem specific data
*
* Obtain the security attribute name suffix and value to set on a newly
* created inode and set up the incore security field for the new inode. This
* hook is called by the fs code as part of the inode creation transaction and
* provides for atomic labeling of the inode, unlike the post_create/mkdir/...
* hooks called by the VFS.
*
* The hook function is expected to populate the xattrs array, by calling
* lsm_get_xattr_slot() to retrieve the slots reserved by the security module
* with the lbs_xattr_count field of the lsm_blob_sizes structure. For each
* slot, the hook function should set ->name to the attribute name suffix
* (e.g. selinux), to allocate ->value (will be freed by the caller) and set it
* to the attribute value, to set ->value_len to the length of the value. If
* the security module does not use security attributes or does not wish to put
* a security attribute on this particular inode, then it should return
* -EOPNOTSUPP to skip this processing.
*
* Return: Returns 0 if the LSM successfully initialized all of the inode
* security attributes that are required, negative values otherwise.
*/
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
const initxattrs initxattrs, void *fs_data)
{
struct security_hook_list *hp;
struct xattr *new_xattrs = NULL;
int ret = -EOPNOTSUPP, xattr_count = 0;
if (unlikely(IS_PRIVATE(inode)))
return 0;
if (!blob_sizes.lbs_xattr_count)
return 0;
if (initxattrs) {
/* Allocate +1 for EVM and +1 as terminator. */
new_xattrs = kcalloc(blob_sizes.lbs_xattr_count + 2,
sizeof(*new_xattrs), GFP_NOFS);
if (!new_xattrs)
return -ENOMEM;
}
hlist_for_each_entry(hp, &security_hook_heads.inode_init_security,
list) {
ret = hp->hook.inode_init_security(inode, dir, qstr, new_xattrs,
&xattr_count);
if (ret && ret != -EOPNOTSUPP)
goto out;
/*
* As documented in lsm_hooks.h, -EOPNOTSUPP in this context
* means that the LSM is not willing to provide an xattr, not
* that it wants to signal an error. Thus, continue to invoke
* the remaining LSMs.
*/
}
/* If initxattrs() is NULL, xattr_count is zero, skip the call. */
if (!xattr_count)
goto out;
ret = evm_inode_init_security(inode, dir, qstr, new_xattrs,
&xattr_count);
if (ret)
goto out;
ret = initxattrs(inode, new_xattrs, fs_data);
out:
for (; xattr_count > 0; xattr_count--)
kfree(new_xattrs[xattr_count - 1].value);
kfree(new_xattrs);
return (ret == -EOPNOTSUPP) ? 0 : ret;
}
EXPORT_SYMBOL(security_inode_init_security);
/**
* security_inode_init_security_anon() - Initialize an anonymous inode
* @inode: the inode
* @name: the anonymous inode class
* @context_inode: an optional related inode
*
* Set up the incore security field for the new anonymous inode and return
* whether the inode creation is permitted by the security module or not.
*
* Return: Returns 0 on success, -EACCES if the security module denies the
* creation of this inode, or another -errno upon other errors.
*/
int security_inode_init_security_anon(struct inode *inode,
const struct qstr *name,
const struct inode *context_inode)
{
return call_int_hook(inode_init_security_anon, 0, inode, name,
context_inode);
}
#ifdef CONFIG_SECURITY_PATH
/**
* security_path_mknod() - Check if creating a special file is allowed
* @dir: parent directory
* @dentry: new file
* @mode: new file mode
* @dev: device number
*
* Check permissions when creating a file. Note that this hook is called even
* if mknod operation is being done for a regular file.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_mknod(const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_mknod, 0, dir, dentry, mode, dev);
}
EXPORT_SYMBOL(security_path_mknod);
/**
* security_path_mkdir() - Check if creating a new directory is allowed
* @dir: parent directory
* @dentry: new directory
* @mode: new directory mode
*
* Check permissions to create a new directory in the existing directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_mkdir(const struct path *dir, struct dentry *dentry,
umode_t mode)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_mkdir, 0, dir, dentry, mode);
}
EXPORT_SYMBOL(security_path_mkdir);
/**
* security_path_rmdir() - Check if removing a directory is allowed
* @dir: parent directory
* @dentry: directory to remove
*
* Check the permission to remove a directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_rmdir(const struct path *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_rmdir, 0, dir, dentry);
}
/**
* security_path_unlink() - Check if removing a hard link is allowed
* @dir: parent directory
* @dentry: file
*
* Check the permission to remove a hard link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_unlink(const struct path *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_unlink, 0, dir, dentry);
}
EXPORT_SYMBOL(security_path_unlink);
/**
* security_path_symlink() - Check if creating a symbolic link is allowed
* @dir: parent directory
* @dentry: symbolic link
* @old_name: file pathname
*
* Check the permission to create a symbolic link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
return 0;
return call_int_hook(path_symlink, 0, dir, dentry, old_name);
}
/**
* security_path_link - Check if creating a hard link is allowed
* @old_dentry: existing file
* @new_dir: new parent directory
* @new_dentry: new link
*
* Check permission before creating a new hard link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
struct dentry *new_dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
return 0;
return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
}
/**
* security_path_rename() - Check if renaming a file is allowed
* @old_dir: parent directory of the old file
* @old_dentry: the old file
* @new_dir: parent directory of the new file
* @new_dentry: the new file
* @flags: flags
*
* Check for permission to rename a file or directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
(d_is_positive(new_dentry) &&
IS_PRIVATE(d_backing_inode(new_dentry)))))
return 0;
return call_int_hook(path_rename, 0, old_dir, old_dentry, new_dir,
new_dentry, flags);
}
EXPORT_SYMBOL(security_path_rename);
/**
* security_path_truncate() - Check if truncating a file is allowed
* @path: file
*
* Check permission before truncating the file indicated by path. Note that
* truncation permissions may also be checked based on already opened files,
* using the security_file_truncate() hook.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_truncate(const struct path *path)
{
if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return call_int_hook(path_truncate, 0, path);
}
/**
* security_path_chmod() - Check if changing the file's mode is allowed
* @path: file
* @mode: new mode
*
* Check for permission to change a mode of the file @path. The new mode is
* specified in @mode which is a bitmask of constants from
* <include/uapi/linux/stat.h>.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_chmod(const struct path *path, umode_t mode)
{
if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return call_int_hook(path_chmod, 0, path, mode);
}
/**
* security_path_chown() - Check if changing the file's owner/group is allowed
* @path: file
* @uid: file owner
* @gid: file group
*
* Check for permission to change owner/group of a file or directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
{
if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return call_int_hook(path_chown, 0, path, uid, gid);
}
/**
* security_path_chroot() - Check if changing the root directory is allowed
* @path: directory
*
* Check for permission to change root directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_path_chroot(const struct path *path)
{
return call_int_hook(path_chroot, 0, path);
}
#endif /* CONFIG_SECURITY_PATH */
/**
* security_inode_create() - Check if creating a file is allowed
* @dir: the parent directory
* @dentry: the file being created
* @mode: requested file mode
*
* Check permission to create a regular file.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_create(struct inode *dir, struct dentry *dentry,
umode_t mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return call_int_hook(inode_create, 0, dir, dentry, mode);
}
EXPORT_SYMBOL_GPL(security_inode_create);
/**
* security_inode_link() - Check if creating a hard link is allowed
* @old_dentry: existing file
* @dir: new parent directory
* @new_dentry: new link
*
* Check permission before creating a new hard link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
return 0;
return call_int_hook(inode_link, 0, old_dentry, dir, new_dentry);
}
/**
* security_inode_unlink() - Check if removing a hard link is allowed
* @dir: parent directory
* @dentry: file
*
* Check the permission to remove a hard link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_unlink(struct inode *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_unlink, 0, dir, dentry);
}
/**
* security_inode_symlink() - Check if creating a symbolic link is allowed
* @dir: parent directory
* @dentry: symbolic link
* @old_name: existing filename
*
* Check the permission to create a symbolic link to a file.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_symlink(struct inode *dir, struct dentry *dentry,
const char *old_name)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return call_int_hook(inode_symlink, 0, dir, dentry, old_name);
}
/**
* security_inode_mkdir() - Check if creation a new director is allowed
* @dir: parent directory
* @dentry: new directory
* @mode: new directory mode
*
* Check permissions to create a new directory in the existing directory
* associated with inode structure @dir.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return call_int_hook(inode_mkdir, 0, dir, dentry, mode);
}
EXPORT_SYMBOL_GPL(security_inode_mkdir);
/**
* security_inode_rmdir() - Check if removing a directory is allowed
* @dir: parent directory
* @dentry: directory to be removed
*
* Check the permission to remove a directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_rmdir, 0, dir, dentry);
}
/**
* security_inode_mknod() - Check if creating a special file is allowed
* @dir: parent directory
* @dentry: new file
* @mode: new file mode
* @dev: device number
*
* Check permissions when creating a special file (or a socket or a fifo file
* created via the mknod system call). Note that if mknod operation is being
* done for a regular file, then the create hook will be called and not this
* hook.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t dev)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return call_int_hook(inode_mknod, 0, dir, dentry, mode, dev);
}
/**
* security_inode_rename() - Check if renaming a file is allowed
* @old_dir: parent directory of the old file
* @old_dentry: the old file
* @new_dir: parent directory of the new file
* @new_dentry: the new file
* @flags: flags
*
* Check for permission to rename a file or directory.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
(d_is_positive(new_dentry) &&
IS_PRIVATE(d_backing_inode(new_dentry)))))
return 0;
if (flags & RENAME_EXCHANGE) {
int err = call_int_hook(inode_rename, 0, new_dir, new_dentry,
old_dir, old_dentry);
if (err)
return err;
}
return call_int_hook(inode_rename, 0, old_dir, old_dentry,
new_dir, new_dentry);
}
/**
* security_inode_readlink() - Check if reading a symbolic link is allowed
* @dentry: link
*
* Check the permission to read the symbolic link.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_readlink(struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_readlink, 0, dentry);
}
/**
* security_inode_follow_link() - Check if following a symbolic link is allowed
* @dentry: link dentry
* @inode: link inode
* @rcu: true if in RCU-walk mode
*
* Check permission to follow a symbolic link when looking up a pathname. If
* @rcu is true, @inode is not stable.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
bool rcu)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return call_int_hook(inode_follow_link, 0, dentry, inode, rcu);
}
/**
* security_inode_permission() - Check if accessing an inode is allowed
* @inode: inode
* @mask: access mask
*
* Check permission before accessing an inode. This hook is called by the
* existing Linux permission function, so a security module can use it to
* provide additional checking for existing Linux permission checks. Notice
* that this hook is called when a file is opened (as well as many other
* operations), whereas the file_security_ops permission hook is called when
* the actual read/write operations are performed.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_permission(struct inode *inode, int mask)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return call_int_hook(inode_permission, 0, inode, mask);
}
/**
* security_inode_setattr() - Check if setting file attributes is allowed
* @idmap: idmap of the mount
* @dentry: file
* @attr: new attributes
*
* Check permission before setting file attributes. Note that the kernel call
* to notify_change is performed from several locations, whenever file
* attributes change (such as when a file is truncated, chown/chmod operations,
* transferring disk quotas, etc).
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr)
{
int ret;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
ret = call_int_hook(inode_setattr, 0, dentry, attr);
if (ret)
return ret;
return evm_inode_setattr(idmap, dentry, attr);
}
EXPORT_SYMBOL_GPL(security_inode_setattr);
/**
* security_inode_getattr() - Check if getting file attributes is allowed
* @path: file
*
* Check permission before obtaining file attributes.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_getattr(const struct path *path)
{
if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
return 0;
return call_int_hook(inode_getattr, 0, path);
}
/**
* security_inode_setxattr() - Check if setting file xattrs is allowed
* @idmap: idmap of the mount
* @dentry: file
* @name: xattr name
* @value: xattr value
* @size: size of xattr value
* @flags: flags
*
* Check permission before setting the extended attributes.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_setxattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
int ret;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
/*
* SELinux and Smack integrate the cap call,
* so assume that all LSMs supplying this call do so.
*/
ret = call_int_hook(inode_setxattr, 1, idmap, dentry, name, value,
size, flags);
if (ret == 1)
ret = cap_inode_setxattr(dentry, name, value, size, flags);
if (ret)
return ret;
ret = ima_inode_setxattr(dentry, name, value, size);
if (ret)
return ret;
return evm_inode_setxattr(idmap, dentry, name, value, size);
}
/**
* security_inode_set_acl() - Check if setting posix acls is allowed
* @idmap: idmap of the mount
* @dentry: file
* @acl_name: acl name
* @kacl: acl struct
*
* Check permission before setting posix acls, the posix acls in @kacl are
* identified by @acl_name.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_set_acl(struct mnt_idmap *idmap,
struct dentry *dentry, const char *acl_name,
struct posix_acl *kacl)
{
int ret;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
ret = call_int_hook(inode_set_acl, 0, idmap, dentry, acl_name,
kacl);
if (ret)
return ret;
ret = ima_inode_set_acl(idmap, dentry, acl_name, kacl);
if (ret)
return ret;
return evm_inode_set_acl(idmap, dentry, acl_name, kacl);
}
/**
* security_inode_get_acl() - Check if reading posix acls is allowed
* @idmap: idmap of the mount
* @dentry: file
* @acl_name: acl name
*
* Check permission before getting osix acls, the posix acls are identified by
* @acl_name.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_get_acl(struct mnt_idmap *idmap,
struct dentry *dentry, const char *acl_name)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_get_acl, 0, idmap, dentry, acl_name);
}
/**
* security_inode_remove_acl() - Check if removing a posix acl is allowed
* @idmap: idmap of the mount
* @dentry: file
* @acl_name: acl name
*
* Check permission before removing posix acls, the posix acls are identified
* by @acl_name.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_remove_acl(struct mnt_idmap *idmap,
struct dentry *dentry, const char *acl_name)
{
int ret;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
ret = call_int_hook(inode_remove_acl, 0, idmap, dentry, acl_name);
if (ret)
return ret;
ret = ima_inode_remove_acl(idmap, dentry, acl_name);
if (ret)
return ret;
return evm_inode_remove_acl(idmap, dentry, acl_name);
}
/**
* security_inode_post_setxattr() - Update the inode after a setxattr operation
* @dentry: file
* @name: xattr name
* @value: xattr value
* @size: xattr value size
* @flags: flags
*
* Update inode security field after successful setxattr operation.
*/
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return;
call_void_hook(inode_post_setxattr, dentry, name, value, size, flags);
evm_inode_post_setxattr(dentry, name, value, size);
}
/**
* security_inode_getxattr() - Check if xattr access is allowed
* @dentry: file
* @name: xattr name
*
* Check permission before obtaining the extended attributes identified by
* @name for @dentry.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_getxattr(struct dentry *dentry, const char *name)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_getxattr, 0, dentry, name);
}
/**
* security_inode_listxattr() - Check if listing xattrs is allowed
* @dentry: file
*
* Check permission before obtaining the list of extended attribute names for
* @dentry.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_listxattr(struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
return call_int_hook(inode_listxattr, 0, dentry);
}
/**
* security_inode_removexattr() - Check if removing an xattr is allowed
* @idmap: idmap of the mount
* @dentry: file
* @name: xattr name
*
* Check permission before removing the extended attribute identified by @name
* for @dentry.
*
* Return: Returns 0 if permission is granted.
*/
int security_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name)
{
int ret;
if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
return 0;
/*
* SELinux and Smack integrate the cap call,
* so assume that all LSMs supplying this call do so.
*/
ret = call_int_hook(inode_removexattr, 1, idmap, dentry, name);
if (ret == 1)
ret = cap_inode_removexattr(idmap, dentry, name);
if (ret)
return ret;
ret = ima_inode_removexattr(dentry, name);
if (ret)
return ret;
return evm_inode_removexattr(idmap, dentry, name);
}
/**
* security_inode_need_killpriv() - Check if security_inode_killpriv() required
* @dentry: associated dentry
*
* Called when an inode has been changed to determine if
* security_inode_killpriv() should be called.
*
* Return: Return <0 on error to abort the inode change operation, return 0 if
* security_inode_killpriv() does not need to be called, return >0 if
* security_inode_killpriv() does need to be called.
*/
int security_inode_need_killpriv(struct dentry *dentry)
{
return call_int_hook(inode_need_killpriv, 0, dentry);
}
/**
* security_inode_killpriv() - The setuid bit is removed, update LSM state
* @idmap: idmap of the mount
* @dentry: associated dentry
*
* The @dentry's setuid bit is being removed. Remove similar security labels.
* Called with the dentry->d_inode->i_mutex held.
*
* Return: Return 0 on success. If error is returned, then the operation
* causing setuid bit removal is failed.
*/
int security_inode_killpriv(struct mnt_idmap *idmap,
struct dentry *dentry)
{
return call_int_hook(inode_killpriv, 0, idmap, dentry);
}
/**
* security_inode_getsecurity() - Get the xattr security label of an inode
* @idmap: idmap of the mount
* @inode: inode
* @name: xattr name
* @buffer: security label buffer
* @alloc: allocation flag
*
* Retrieve a copy of the extended attribute representation of the security
* label associated with @name for @inode via @buffer. Note that @name is the
* remainder of the attribute name after the security prefix has been removed.
* @alloc is used to specify if the call should return a value via the buffer
* or just the value length.
*
* Return: Returns size of buffer on success.
*/
int security_inode_getsecurity(struct mnt_idmap *idmap,
struct inode *inode, const char *name,
void **buffer, bool alloc)
{
struct security_hook_list *hp;
int rc;
if (unlikely(IS_PRIVATE(inode)))
return LSM_RET_DEFAULT(inode_getsecurity);
/*
* Only one module will provide an attribute with a given name.
*/
hlist_for_each_entry(hp, &security_hook_heads.inode_getsecurity, list) {
rc = hp->hook.inode_getsecurity(idmap, inode, name, buffer,
alloc);
if (rc != LSM_RET_DEFAULT(inode_getsecurity))
return rc;
}
return LSM_RET_DEFAULT(inode_getsecurity);
}
/**
* security_inode_setsecurity() - Set the xattr security label of an inode
* @inode: inode
* @name: xattr name
* @value: security label
* @size: length of security label
* @flags: flags
*
* Set the security label associated with @name for @inode from the extended
* attribute value @value. @size indicates the size of the @value in bytes.
* @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. Note that @name is the
* remainder of the attribute name after the security. prefix has been removed.
*
* Return: Returns 0 on success.
*/
int security_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
struct security_hook_list *hp;
int rc;
if (unlikely(IS_PRIVATE(inode)))
return LSM_RET_DEFAULT(inode_setsecurity);
/*
* Only one module will provide an attribute with a given name.
*/
hlist_for_each_entry(hp, &security_hook_heads.inode_setsecurity, list) {
rc = hp->hook.inode_setsecurity(inode, name, value, size,
flags);
if (rc != LSM_RET_DEFAULT(inode_setsecurity))
return rc;
}
return LSM_RET_DEFAULT(inode_setsecurity);
}
/**
* security_inode_listsecurity() - List the xattr security label names
* @inode: inode
* @buffer: buffer
* @buffer_size: size of buffer
*
* Copy the extended attribute names for the security labels associated with
* @inode into @buffer. The maximum size of @buffer is specified by
* @buffer_size. @buffer may be NULL to request the size of the buffer
* required.
*
* Return: Returns number of bytes used/required on success.
*/
int security_inode_listsecurity(struct inode *inode,
char *buffer, size_t buffer_size)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return call_int_hook(inode_listsecurity, 0, inode, buffer, buffer_size);
}
EXPORT_SYMBOL(security_inode_listsecurity);
/**
* security_inode_getsecid() - Get an inode's secid
* @inode: inode
* @secid: secid to return
*
* Get the secid associated with the node. In case of failure, @secid will be
* set to zero.
*/
void security_inode_getsecid(struct inode *inode, u32 *secid)
{
call_void_hook(inode_getsecid, inode, secid);
}
/**
* security_inode_copy_up() - Create new creds for an overlayfs copy-up op
* @src: union dentry of copy-up file
* @new: newly created creds
*
* A file is about to be copied up from lower layer to upper layer of overlay
* filesystem. Security module can prepare a set of new creds and modify as
* need be and return new creds. Caller will switch to new creds temporarily to
* create new file and release newly allocated creds.
*
* Return: Returns 0 on success or a negative error code on error.
*/
int security_inode_copy_up(struct dentry *src, struct cred **new)
{
return call_int_hook(inode_copy_up, 0, src, new);
}
EXPORT_SYMBOL(security_inode_copy_up);
/**
* security_inode_copy_up_xattr() - Filter xattrs in an overlayfs copy-up op
* @name: xattr name
*
* Filter the xattrs being copied up when a unioned file is copied up from a
* lower layer to the union/overlay layer. The caller is responsible for
* reading and writing the xattrs, this hook is merely a filter.
*
* Return: Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP
* if the security module does not know about attribute, or a negative
* error code to abort the copy up.
*/
int security_inode_copy_up_xattr(const char *name)
{
struct security_hook_list *hp;
int rc;
/*
* The implementation can return 0 (accept the xattr), 1 (discard the
* xattr), -EOPNOTSUPP if it does not know anything about the xattr or
* any other error code in case of an error.
*/
hlist_for_each_entry(hp,
&security_hook_heads.inode_copy_up_xattr, list) {
rc = hp->hook.inode_copy_up_xattr(name);
if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
return rc;
}
return LSM_RET_DEFAULT(inode_copy_up_xattr);
}
EXPORT_SYMBOL(security_inode_copy_up_xattr);
/**
* security_kernfs_init_security() - Init LSM context for a kernfs node
* @kn_dir: parent kernfs node
* @kn: the kernfs node to initialize
*
* Initialize the security context of a newly created kernfs node based on its
* own and its parent's attributes.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
return call_int_hook(kernfs_init_security, 0, kn_dir, kn);
}
/**
* security_file_permission() - Check file permissions
* @file: file
* @mask: requested permissions
*
* Check file permissions before accessing an open file. This hook is called
* by various operations that read or write files. A security module can use
* this hook to perform additional checking on these operations, e.g. to
* revalidate permissions on use to support privilege bracketing or policy
* changes. Notice that this hook is used when the actual read/write
* operations are performed, whereas the inode_security_ops hook is called when
* a file is opened (as well as many other operations). Although this hook can
* be used to revalidate permissions for various system call operations that
* read or write files, it does not address the revalidation of permissions for
* memory-mapped files. Security modules must handle this separately if they
* need such revalidation.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_permission(struct file *file, int mask)
{
int ret;
ret = call_int_hook(file_permission, 0, file, mask);
if (ret)
return ret;
return fsnotify_perm(file, mask);
}
/**
* security_file_alloc() - Allocate and init a file's LSM blob
* @file: the file
*
* Allocate and attach a security structure to the file->f_security field. The
* security field is initialized to NULL when the structure is first created.
*
* Return: Return 0 if the hook is successful and permission is granted.
*/
int security_file_alloc(struct file *file)
{
int rc = lsm_file_alloc(file);
if (rc)
return rc;
rc = call_int_hook(file_alloc_security, 0, file);
if (unlikely(rc))
security_file_free(file);
return rc;
}
/**
* security_file_free() - Free a file's LSM blob
* @file: the file
*
* Deallocate and free any security structures stored in file->f_security.
*/
void security_file_free(struct file *file)
{
void *blob;
call_void_hook(file_free_security, file);
blob = file->f_security;
if (blob) {
file->f_security = NULL;
kmem_cache_free(lsm_file_cache, blob);
}
}
/**
* security_file_ioctl() - Check if an ioctl is allowed
* @file: associated file
* @cmd: ioctl cmd
* @arg: ioctl arguments
*
* Check permission for an ioctl operation on @file. Note that @arg sometimes
* represents a user space pointer; in other cases, it may be a simple integer
* value. When @arg represents a user space pointer, it should never be used
* by the security module.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return call_int_hook(file_ioctl, 0, file, cmd, arg);
}
EXPORT_SYMBOL_GPL(security_file_ioctl);
static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
{
/*
* Does we have PROT_READ and does the application expect
* it to imply PROT_EXEC? If not, nothing to talk about...
*/
if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
return prot;
if (!(current->personality & READ_IMPLIES_EXEC))
return prot;
/*
* if that's an anonymous mapping, let it.
*/
if (!file)
return prot | PROT_EXEC;
/*
* ditto if it's not on noexec mount, except that on !MMU we need
* NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
*/
if (!path_noexec(&file->f_path)) {
#ifndef CONFIG_MMU
if (file->f_op->mmap_capabilities) {
unsigned caps = file->f_op->mmap_capabilities(file);
if (!(caps & NOMMU_MAP_EXEC))
return prot;
}
#endif
return prot | PROT_EXEC;
}
/* anything on noexec mount won't get PROT_EXEC */
return prot;
}
/**
* security_mmap_file() - Check if mmap'ing a file is allowed
* @file: file
* @prot: protection applied by the kernel
* @flags: flags
*
* Check permissions for a mmap operation. The @file may be NULL, e.g. if
* mapping anonymous memory.
*
* Return: Returns 0 if permission is granted.
*/
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
unsigned long prot_adj = mmap_prot(file, prot);
int ret;
ret = call_int_hook(mmap_file, 0, file, prot, prot_adj, flags);
if (ret)
return ret;
return ima_file_mmap(file, prot, prot_adj, flags);
}
/**
* security_mmap_addr() - Check if mmap'ing an address is allowed
* @addr: address
*
* Check permissions for a mmap operation at @addr.
*
* Return: Returns 0 if permission is granted.
*/
int security_mmap_addr(unsigned long addr)
{
return call_int_hook(mmap_addr, 0, addr);
}
/**
* security_file_mprotect() - Check if changing memory protections is allowed
* @vma: memory region
* @reqprot: application requested protection
* @prot: protection applied by the kernel
*
* Check permissions before changing memory access permissions.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot)
{
int ret;
ret = call_int_hook(file_mprotect, 0, vma, reqprot, prot);
if (ret)
return ret;
return ima_file_mprotect(vma, prot);
}
/**
* security_file_lock() - Check if a file lock is allowed
* @file: file
* @cmd: lock operation (e.g. F_RDLCK, F_WRLCK)
*
* Check permission before performing file locking operations. Note the hook
* mediates both flock and fcntl style locks.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_lock(struct file *file, unsigned int cmd)
{
return call_int_hook(file_lock, 0, file, cmd);
}
/**
* security_file_fcntl() - Check if fcntl() op is allowed
* @file: file
* @cmd: fcntl command
* @arg: command argument
*
* Check permission before allowing the file operation specified by @cmd from
* being performed on the file @file. Note that @arg sometimes represents a
* user space pointer; in other cases, it may be a simple integer value. When
* @arg represents a user space pointer, it should never be used by the
* security module.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
return call_int_hook(file_fcntl, 0, file, cmd, arg);
}
/**
* security_file_set_fowner() - Set the file owner info in the LSM blob
* @file: the file
*
* Save owner security information (typically from current->security) in
* file->f_security for later use by the send_sigiotask hook.
*
* Return: Returns 0 on success.
*/
void security_file_set_fowner(struct file *file)
{
call_void_hook(file_set_fowner, file);
}
/**
* security_file_send_sigiotask() - Check if sending SIGIO/SIGURG is allowed
* @tsk: target task
* @fown: signal sender
* @sig: signal to be sent, SIGIO is sent if 0
*
* Check permission for the file owner @fown to send SIGIO or SIGURG to the
* process @tsk. Note that this hook is sometimes called from interrupt. Note
* that the fown_struct, @fown, is never outside the context of a struct file,
* so the file structure (and associated security information) can always be
* obtained: container_of(fown, struct file, f_owner).
*
* Return: Returns 0 if permission is granted.
*/
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig)
{
return call_int_hook(file_send_sigiotask, 0, tsk, fown, sig);
}
/**
* security_file_receive() - Check is receiving a file via IPC is allowed
* @file: file being received
*
* This hook allows security modules to control the ability of a process to
* receive an open file descriptor via socket IPC.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_receive(struct file *file)
{
return call_int_hook(file_receive, 0, file);
}
/**
* security_file_open() - Save open() time state for late use by the LSM
* @file:
*
* Save open-time permission checking state for later use upon file_permission,
* and recheck access if anything has changed since inode_permission.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_open(struct file *file)
{
int ret;
ret = call_int_hook(file_open, 0, file);
if (ret)
return ret;
return fsnotify_perm(file, MAY_OPEN);
}
/**
* security_file_truncate() - Check if truncating a file is allowed
* @file: file
*
* Check permission before truncating a file, i.e. using ftruncate. Note that
* truncation permission may also be checked based on the path, using the
* @path_truncate hook.
*
* Return: Returns 0 if permission is granted.
*/
int security_file_truncate(struct file *file)
{
return call_int_hook(file_truncate, 0, file);
}
/**
* security_task_alloc() - Allocate a task's LSM blob
* @task: the task
* @clone_flags: flags indicating what is being shared
*
* Handle allocation of task-related resources.
*
* Return: Returns a zero on success, negative values on failure.
*/
int security_task_alloc(struct task_struct *task, unsigned long clone_flags)
{
int rc = lsm_task_alloc(task);
if (rc)
return rc;
rc = call_int_hook(task_alloc, 0, task, clone_flags);
if (unlikely(rc))
security_task_free(task);
return rc;
}
/**
* security_task_free() - Free a task's LSM blob and related resources
* @task: task
*
* Handle release of task-related resources. Note that this can be called from
* interrupt context.
*/
void security_task_free(struct task_struct *task)
{
call_void_hook(task_free, task);
kfree(task->security);
task->security = NULL;
}
/**
* security_cred_alloc_blank() - Allocate the min memory to allow cred_transfer
* @cred: credentials
* @gfp: gfp flags
*
* Only allocate sufficient memory and attach to @cred such that
* cred_transfer() will not get ENOMEM.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
int rc = lsm_cred_alloc(cred, gfp);
if (rc)
return rc;
rc = call_int_hook(cred_alloc_blank, 0, cred, gfp);
if (unlikely(rc))
security_cred_free(cred);
return rc;
}
/**
* security_cred_free() - Free the cred's LSM blob and associated resources
* @cred: credentials
*
* Deallocate and clear the cred->security field in a set of credentials.
*/
void security_cred_free(struct cred *cred)
{
/*
* There is a failure case in prepare_creds() that
* may result in a call here with ->security being NULL.
*/
if (unlikely(cred->security == NULL))
return;
call_void_hook(cred_free, cred);
kfree(cred->security);
cred->security = NULL;
}
/**
* security_prepare_creds() - Prepare a new set of credentials
* @new: new credentials
* @old: original credentials
* @gfp: gfp flags
*
* Prepare a new set of credentials by copying the data from the old set.
*
* Return: Returns 0 on success, negative values on failure.
*/
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
{
int rc = lsm_cred_alloc(new, gfp);
if (rc)
return rc;
rc = call_int_hook(cred_prepare, 0, new, old, gfp);
if (unlikely(rc))
security_cred_free(new);
return rc;
}
/**
* security_transfer_creds() - Transfer creds
* @new: target credentials
* @old: original credentials
*
* Transfer data from original creds to new creds.
*/
void security_transfer_creds(struct cred *new, const struct cred *old)
{
call_void_hook(cred_transfer, new, old);
}
/**
* security_cred_getsecid() - Get the secid from a set of credentials
* @c: credentials
* @secid: secid value
*
* Retrieve the security identifier of the cred structure @c. In case of
* failure, @secid will be set to zero.
*/
void security_cred_getsecid(const struct cred *c, u32 *secid)
{
*secid = 0;
call_void_hook(cred_getsecid, c, secid);
}
EXPORT_SYMBOL(security_cred_getsecid);
/**
* security_kernel_act_as() - Set the kernel credentials to act as secid
* @new: credentials
* @secid: secid
*
* Set the credentials for a kernel service to act as (subjective context).
* The current task must be the one that nominated @secid.
*
* Return: Returns 0 if successful.
*/
int security_kernel_act_as(struct cred *new, u32 secid)
{
return call_int_hook(kernel_act_as, 0, new, secid);
}
/**
* security_kernel_create_files_as() - Set file creation context using an inode
* @new: target credentials
* @inode: reference inode
*
* Set the file creation context in a set of credentials to be the same as the
* objective context of the specified inode. The current task must be the one
* that nominated @inode.
*
* Return: Returns 0 if successful.
*/
int security_kernel_create_files_as(struct cred *new, struct inode *inode)
{
return call_int_hook(kernel_create_files_as, 0, new, inode);
}
/**
* security_kernel_module_request() - Check is loading a module is allowed
* @kmod_name: module name
*
* Ability to trigger the kernel to automatically upcall to userspace for
* userspace to load a kernel module with the given name.
*
* Return: Returns 0 if successful.
*/
int security_kernel_module_request(char *kmod_name)
{
int ret;
ret = call_int_hook(kernel_module_request, 0, kmod_name);
if (ret)
return ret;
return integrity_kernel_module_request(kmod_name);
}
/**
* security_kernel_read_file() - Read a file specified by userspace
* @file: file
* @id: file identifier
* @contents: trust if security_kernel_post_read_file() will be called
*
* Read a file specified by userspace.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id,
bool contents)
{
int ret;
ret = call_int_hook(kernel_read_file, 0, file, id, contents);
if (ret)
return ret;
return ima_read_file(file, id, contents);
}
EXPORT_SYMBOL_GPL(security_kernel_read_file);
/**
* security_kernel_post_read_file() - Read a file specified by userspace
* @file: file
* @buf: file contents
* @size: size of file contents
* @id: file identifier
*
* Read a file specified by userspace. This must be paired with a prior call
* to security_kernel_read_file() call that indicated this hook would also be
* called, see security_kernel_read_file() for more information.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id)
{
int ret;
ret = call_int_hook(kernel_post_read_file, 0, file, buf, size, id);
if (ret)
return ret;
return ima_post_read_file(file, buf, size, id);
}
EXPORT_SYMBOL_GPL(security_kernel_post_read_file);
/**
* security_kernel_load_data() - Load data provided by userspace
* @id: data identifier
* @contents: true if security_kernel_post_load_data() will be called
*
* Load data provided by userspace.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernel_load_data(enum kernel_load_data_id id, bool contents)
{
int ret;
ret = call_int_hook(kernel_load_data, 0, id, contents);
if (ret)
return ret;
return ima_load_data(id, contents);
}
EXPORT_SYMBOL_GPL(security_kernel_load_data);
/**
* security_kernel_post_load_data() - Load userspace data from a non-file source
* @buf: data
* @size: size of data
* @id: data identifier
* @description: text description of data, specific to the id value
*
* Load data provided by a non-file source (usually userspace buffer). This
* must be paired with a prior security_kernel_load_data() call that indicated
* this hook would also be called, see security_kernel_load_data() for more
* information.
*
* Return: Returns 0 if permission is granted.
*/
int security_kernel_post_load_data(char *buf, loff_t size,
enum kernel_load_data_id id,
char *description)
{
int ret;
ret = call_int_hook(kernel_post_load_data, 0, buf, size, id,
description);
if (ret)
return ret;
return ima_post_load_data(buf, size, id, description);
}
EXPORT_SYMBOL_GPL(security_kernel_post_load_data);
/**
* security_task_fix_setuid() - Update LSM with new user id attributes
* @new: updated credentials
* @old: credentials being replaced
* @flags: LSM_SETID_* flag values
*
* Update the module's state after setting one or more of the user identity
* attributes of the current process. The @flags parameter indicates which of
* the set*uid system calls invoked this hook. If @new is the set of
* credentials that will be installed. Modifications should be made to this
* rather than to @current->cred.
*
* Return: Returns 0 on success.
*/
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags)
{
return call_int_hook(task_fix_setuid, 0, new, old, flags);
}
/**
* security_task_fix_setgid() - Update LSM with new group id attributes
* @new: updated credentials
* @old: credentials being replaced
* @flags: LSM_SETID_* flag value
*
* Update the module's state after setting one or more of the group identity
* attributes of the current process. The @flags parameter indicates which of
* the set*gid system calls invoked this hook. @new is the set of credentials
* that will be installed. Modifications should be made to this rather than to
* @current->cred.
*
* Return: Returns 0 on success.
*/
int security_task_fix_setgid(struct cred *new, const struct cred *old,
int flags)
{
return call_int_hook(task_fix_setgid, 0, new, old, flags);
}
/**
* security_task_fix_setgroups() - Update LSM with new supplementary groups
* @new: updated credentials
* @old: credentials being replaced
*
* Update the module's state after setting the supplementary group identity
* attributes of the current process. @new is the set of credentials that will
* be installed. Modifications should be made to this rather than to
* @current->cred.
*
* Return: Returns 0 on success.
*/
int security_task_fix_setgroups(struct cred *new, const struct cred *old)
{
return call_int_hook(task_fix_setgroups, 0, new, old);
}
/**
* security_task_setpgid() - Check if setting the pgid is allowed
* @p: task being modified
* @pgid: new pgid
*
* Check permission before setting the process group identifier of the process
* @p to @pgid.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return call_int_hook(task_setpgid, 0, p, pgid);
}
/**
* security_task_getpgid() - Check if getting the pgid is allowed
* @p: task
*
* Check permission before getting the process group identifier of the process
* @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_getpgid(struct task_struct *p)
{
return call_int_hook(task_getpgid, 0, p);
}
/**
* security_task_getsid() - Check if getting the session id is allowed
* @p: task
*
* Check permission before getting the session identifier of the process @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_getsid(struct task_struct *p)
{
return call_int_hook(task_getsid, 0, p);
}
/**
* security_current_getsecid_subj() - Get the current task's subjective secid
* @secid: secid value
*
* Retrieve the subjective security identifier of the current task and return
* it in @secid. In case of failure, @secid will be set to zero.
*/
void security_current_getsecid_subj(u32 *secid)
{
*secid = 0;
call_void_hook(current_getsecid_subj, secid);
}
EXPORT_SYMBOL(security_current_getsecid_subj);
/**
* security_task_getsecid_obj() - Get a task's objective secid
* @p: target task
* @secid: secid value
*
* Retrieve the objective security identifier of the task_struct in @p and
* return it in @secid. In case of failure, @secid will be set to zero.
*/
void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
*secid = 0;
call_void_hook(task_getsecid_obj, p, secid);
}
EXPORT_SYMBOL(security_task_getsecid_obj);
/**
* security_task_setnice() - Check if setting a task's nice value is allowed
* @p: target task
* @nice: nice value
*
* Check permission before setting the nice value of @p to @nice.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setnice(struct task_struct *p, int nice)
{
return call_int_hook(task_setnice, 0, p, nice);
}
/**
* security_task_setioprio() - Check if setting a task's ioprio is allowed
* @p: target task
* @ioprio: ioprio value
*
* Check permission before setting the ioprio value of @p to @ioprio.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setioprio(struct task_struct *p, int ioprio)
{
return call_int_hook(task_setioprio, 0, p, ioprio);
}
/**
* security_task_getioprio() - Check if getting a task's ioprio is allowed
* @p: task
*
* Check permission before getting the ioprio value of @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_getioprio(struct task_struct *p)
{
return call_int_hook(task_getioprio, 0, p);
}
/**
* security_task_prlimit() - Check if get/setting resources limits is allowed
* @cred: current task credentials
* @tcred: target task credentials
* @flags: LSM_PRLIMIT_* flag bits indicating a get/set/both
*
* Check permission before getting and/or setting the resource limits of
* another task.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_prlimit(const struct cred *cred, const struct cred *tcred,
unsigned int flags)
{
return call_int_hook(task_prlimit, 0, cred, tcred, flags);
}
/**
* security_task_setrlimit() - Check if setting a new rlimit value is allowed
* @p: target task's group leader
* @resource: resource whose limit is being set
* @new_rlim: new resource limit
*
* Check permission before setting the resource limits of process @p for
* @resource to @new_rlim. The old resource limit values can be examined by
* dereferencing (p->signal->rlim + resource).
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim)
{
return call_int_hook(task_setrlimit, 0, p, resource, new_rlim);
}
/**
* security_task_setscheduler() - Check if setting sched policy/param is allowed
* @p: target task
*
* Check permission before setting scheduling policy and/or parameters of
* process @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_setscheduler(struct task_struct *p)
{
return call_int_hook(task_setscheduler, 0, p);
}
/**
* security_task_getscheduler() - Check if getting scheduling info is allowed
* @p: target task
*
* Check permission before obtaining scheduling information for process @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_getscheduler(struct task_struct *p)
{
return call_int_hook(task_getscheduler, 0, p);
}
/**
* security_task_movememory() - Check if moving memory is allowed
* @p: task
*
* Check permission before moving memory owned by process @p.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_movememory(struct task_struct *p)
{
return call_int_hook(task_movememory, 0, p);
}
/**
* security_task_kill() - Check if sending a signal is allowed
* @p: target process
* @info: signal information
* @sig: signal value
* @cred: credentials of the signal sender, NULL if @current
*
* Check permission before sending signal @sig to @p. @info can be NULL, the
* constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or
* SI_FROMKERNEL(info) is true, then the signal should be viewed as coming from
* the kernel and should typically be permitted. SIGIO signals are handled
* separately by the send_sigiotask hook in file_security_ops.
*
* Return: Returns 0 if permission is granted.
*/
int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int sig, const struct cred *cred)
{
return call_int_hook(task_kill, 0, p, info, sig, cred);
}
/**
* security_task_prctl() - Check if a prctl op is allowed
* @option: operation
* @arg2: argument
* @arg3: argument
* @arg4: argument
* @arg5: argument
*
* Check permission before performing a process control operation on the
* current process.
*
* Return: Return -ENOSYS if no-one wanted to handle this op, any other value
* to cause prctl() to return immediately with that value.
*/
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
int thisrc;
int rc = LSM_RET_DEFAULT(task_prctl);
struct security_hook_list *hp;
hlist_for_each_entry(hp, &security_hook_heads.task_prctl, list) {
thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5);
if (thisrc != LSM_RET_DEFAULT(task_prctl)) {
rc = thisrc;
if (thisrc != 0)
break;
}
}
return rc;
}
/**
* security_task_to_inode() - Set the security attributes of a task's inode
* @p: task
* @inode: inode
*
* Set the security attributes for an inode based on an associated task's
* security attributes, e.g. for /proc/pid inodes.
*/
void security_task_to_inode(struct task_struct *p, struct inode *inode)
{
call_void_hook(task_to_inode, p, inode);
}
/**
* security_create_user_ns() - Check if creating a new userns is allowed
* @cred: prepared creds
*
* Check permission prior to creating a new user namespace.
*
* Return: Returns 0 if successful, otherwise < 0 error code.
*/
int security_create_user_ns(const struct cred *cred)
{
return call_int_hook(userns_create, 0, cred);
}
/**
* security_ipc_permission() - Check if sysv ipc access is allowed
* @ipcp: ipc permission structure
* @flag: requested permissions
*
* Check permissions for access to IPC.
*
* Return: Returns 0 if permission is granted.
*/
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
{
return call_int_hook(ipc_permission, 0, ipcp, flag);
}
/**
* security_ipc_getsecid() - Get the sysv ipc object's secid
* @ipcp: ipc permission structure
* @secid: secid pointer
*
* Get the secid associated with the ipc object. In case of failure, @secid
* will be set to zero.
*/
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
{
*secid = 0;
call_void_hook(ipc_getsecid, ipcp, secid);
}
/**
* security_msg_msg_alloc() - Allocate a sysv ipc message LSM blob
* @msg: message structure
*
* Allocate and attach a security structure to the msg->security field. The
* security field is initialized to NULL when the structure is first created.
*
* Return: Return 0 if operation was successful and permission is granted.
*/
int security_msg_msg_alloc(struct msg_msg *msg)
{
int rc = lsm_msg_msg_alloc(msg);
if (unlikely(rc))
return rc;
rc = call_int_hook(msg_msg_alloc_security, 0, msg);
if (unlikely(rc))
security_msg_msg_free(msg);
return rc;
}
/**
* security_msg_msg_free() - Free a sysv ipc message LSM blob
* @msg: message structure
*
* Deallocate the security structure for this message.
*/
void security_msg_msg_free(struct msg_msg *msg)
{
call_void_hook(msg_msg_free_security, msg);
kfree(msg->security);
msg->security = NULL;
}
/**
* security_msg_queue_alloc() - Allocate a sysv ipc msg queue LSM blob
* @msq: sysv ipc permission structure
*
* Allocate and attach a security structure to @msg. The security field is
* initialized to NULL when the structure is first created.
*
* Return: Returns 0 if operation was successful and permission is granted.
*/
int security_msg_queue_alloc(struct kern_ipc_perm *msq)
{
int rc = lsm_ipc_alloc(msq);
if (unlikely(rc))
return rc;
rc = call_int_hook(msg_queue_alloc_security, 0, msq);
if (unlikely(rc))
security_msg_queue_free(msq);
return rc;
}
/**
* security_msg_queue_free() - Free a sysv ipc msg queue LSM blob
* @msq: sysv ipc permission structure
*
* Deallocate security field @perm->security for the message queue.
*/
void security_msg_queue_free(struct kern_ipc_perm *msq)
{
call_void_hook(msg_queue_free_security, msq);
kfree(msq->security);
msq->security = NULL;
}
/**
* security_msg_queue_associate() - Check if a msg queue operation is allowed
* @msq: sysv ipc permission structure
* @msqflg: operation flags
*
* Check permission when a message queue is requested through the msgget system
* call. This hook is only called when returning the message queue identifier
* for an existing message queue, not when a new message queue is created.
*
* Return: Return 0 if permission is granted.
*/
int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
{
return call_int_hook(msg_queue_associate, 0, msq, msqflg);
}
/**
* security_msg_queue_msgctl() - Check if a msg queue operation is allowed
* @msq: sysv ipc permission structure
* @cmd: operation
*
* Check permission when a message control operation specified by @cmd is to be
* performed on the message queue with permissions.
*
* Return: Returns 0 if permission is granted.
*/
int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
{
return call_int_hook(msg_queue_msgctl, 0, msq, cmd);
}
/**
* security_msg_queue_msgsnd() - Check if sending a sysv ipc message is allowed
* @msq: sysv ipc permission structure
* @msg: message
* @msqflg: operation flags
*
* Check permission before a message, @msg, is enqueued on the message queue
* with permissions specified in @msq.
*
* Return: Returns 0 if permission is granted.
*/
int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
struct msg_msg *msg, int msqflg)
{
return call_int_hook(msg_queue_msgsnd, 0, msq, msg, msqflg);
}
/**
* security_msg_queue_msgrcv() - Check if receiving a sysv ipc msg is allowed
* @msq: sysv ipc permission structure
* @msg: message
* @target: target task
* @type: type of message requested
* @mode: operation flags
*
* Check permission before a message, @msg, is removed from the message queue.
* The @target task structure contains a pointer to the process that will be
* receiving the message (not equal to the current process when inline receives
* are being performed).
*
* Return: Returns 0 if permission is granted.
*/
int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
struct task_struct *target, long type, int mode)
{
return call_int_hook(msg_queue_msgrcv, 0, msq, msg, target, type, mode);
}
/**
* security_shm_alloc() - Allocate a sysv shm LSM blob
* @shp: sysv ipc permission structure
*
* Allocate and attach a security structure to the @shp security field. The
* security field is initialized to NULL when the structure is first created.
*
* Return: Returns 0 if operation was successful and permission is granted.
*/
int security_shm_alloc(struct kern_ipc_perm *shp)
{
int rc = lsm_ipc_alloc(shp);
if (unlikely(rc))
return rc;
rc = call_int_hook(shm_alloc_security, 0, shp);
if (unlikely(rc))
security_shm_free(shp);
return rc;
}
/**
* security_shm_free() - Free a sysv shm LSM blob
* @shp: sysv ipc permission structure
*
* Deallocate the security structure @perm->security for the memory segment.
*/
void security_shm_free(struct kern_ipc_perm *shp)
{
call_void_hook(shm_free_security, shp);
kfree(shp->security);
shp->security = NULL;
}
/**
* security_shm_associate() - Check if a sysv shm operation is allowed
* @shp: sysv ipc permission structure
* @shmflg: operation flags
*
* Check permission when a shared memory region is requested through the shmget
* system call. This hook is only called when returning the shared memory
* region identifier for an existing region, not when a new shared memory
* region is created.
*
* Return: Returns 0 if permission is granted.
*/
int security_shm_associate(struct kern_ipc_perm *shp, int shmflg)
{
return call_int_hook(shm_associate, 0, shp, shmflg);
}
/**
* security_shm_shmctl() - Check if a sysv shm operation is allowed
* @shp: sysv ipc permission structure
* @cmd: operation
*
* Check permission when a shared memory control operation specified by @cmd is
* to be performed on the shared memory region with permissions in @shp.
*
* Return: Return 0 if permission is granted.
*/
int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
{
return call_int_hook(shm_shmctl, 0, shp, cmd);
}
/**
* security_shm_shmat() - Check if a sysv shm attach operation is allowed
* @shp: sysv ipc permission structure
* @shmaddr: address of memory region to attach
* @shmflg: operation flags
*
* Check permissions prior to allowing the shmat system call to attach the
* shared memory segment with permissions @shp to the data segment of the
* calling process. The attaching address is specified by @shmaddr.
*
* Return: Returns 0 if permission is granted.
*/
int security_shm_shmat(struct kern_ipc_perm *shp,
char __user *shmaddr, int shmflg)
{
return call_int_hook(shm_shmat, 0, shp, shmaddr, shmflg);
}
/**
* security_sem_alloc() - Allocate a sysv semaphore LSM blob
* @sma: sysv ipc permission structure
*
* Allocate and attach a security structure to the @sma security field. The
* security field is initialized to NULL when the structure is first created.
*
* Return: Returns 0 if operation was successful and permission is granted.
*/
int security_sem_alloc(struct kern_ipc_perm *sma)
{
int rc = lsm_ipc_alloc(sma);
if (unlikely(rc))
return rc;
rc = call_int_hook(sem_alloc_security, 0, sma);
if (unlikely(rc))
security_sem_free(sma);
return rc;
}
/**
* security_sem_free() - Free a sysv semaphore LSM blob
* @sma: sysv ipc permission structure
*
* Deallocate security structure @sma->security for the semaphore.
*/
void security_sem_free(struct kern_ipc_perm *sma)
{
call_void_hook(sem_free_security, sma);
kfree(sma->security);
sma->security = NULL;
}
/**
* security_sem_associate() - Check if a sysv semaphore operation is allowed
* @sma: sysv ipc permission structure
* @semflg: operation flags
*
* Check permission when a semaphore is requested through the semget system
* call. This hook is only called when returning the semaphore identifier for
* an existing semaphore, not when a new one must be created.
*
* Return: Returns 0 if permission is granted.
*/
int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
{
return call_int_hook(sem_associate, 0, sma, semflg);
}
/**
* security_sem_semctl() - Check if a sysv semaphore operation is allowed
* @sma: sysv ipc permission structure
* @cmd: operation
*
* Check permission when a semaphore operation specified by @cmd is to be
* performed on the semaphore.
*
* Return: Returns 0 if permission is granted.
*/
int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
{
return call_int_hook(sem_semctl, 0, sma, cmd);
}
/**
* security_sem_semop() - Check if a sysv semaphore operation is allowed
* @sma: sysv ipc permission structure
* @sops: operations to perform
* @nsops: number of operations
* @alter: flag indicating changes will be made
*
* Check permissions before performing operations on members of the semaphore
* set. If the @alter flag is nonzero, the semaphore set may be modified.
*
* Return: Returns 0 if permission is granted.
*/
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter)
{
return call_int_hook(sem_semop, 0, sma, sops, nsops, alter);
}
/**
* security_d_instantiate() - Populate an inode's LSM state based on a dentry
* @dentry: dentry
* @inode: inode
*
* Fill in @inode security information for a @dentry if allowed.
*/
void security_d_instantiate(struct dentry *dentry, struct inode *inode)
{
if (unlikely(inode && IS_PRIVATE(inode)))
return;
call_void_hook(d_instantiate, dentry, inode);
}
EXPORT_SYMBOL(security_d_instantiate);
/**
* security_getprocattr() - Read an attribute for a task
* @p: the task
* @lsm: LSM name
* @name: attribute name
* @value: attribute value
*
* Read attribute @name for task @p and store it into @value if allowed.
*
* Return: Returns the length of @value on success, a negative value otherwise.
*/
int security_getprocattr(struct task_struct *p, const char *lsm,
const char *name, char **value)
{
struct security_hook_list *hp;
hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
if (lsm != NULL && strcmp(lsm, hp->lsm))
continue;
return hp->hook.getprocattr(p, name, value);
}
return LSM_RET_DEFAULT(getprocattr);
}
/**
* security_setprocattr() - Set an attribute for a task
* @lsm: LSM name
* @name: attribute name
* @value: attribute value
* @size: attribute value size
*
* Write (set) the current task's attribute @name to @value, size @size if
* allowed.
*
* Return: Returns bytes written on success, a negative value otherwise.
*/
int security_setprocattr(const char *lsm, const char *name, void *value,
size_t size)
{
struct security_hook_list *hp;
hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
if (lsm != NULL && strcmp(lsm, hp->lsm))
continue;
return hp->hook.setprocattr(name, value, size);
}
return LSM_RET_DEFAULT(setprocattr);
}
/**
* security_netlink_send() - Save info and check if netlink sending is allowed
* @sk: sending socket
* @skb: netlink message
*
* Save security information for a netlink message so that permission checking
* can be performed when the message is processed. The security information
* can be saved using the eff_cap field of the netlink_skb_parms structure.
* Also may be used to provide fine grained control over message transmission.
*
* Return: Returns 0 if the information was successfully saved and message is
* allowed to be transmitted.
*/
int security_netlink_send(struct sock *sk, struct sk_buff *skb)
{
return call_int_hook(netlink_send, 0, sk, skb);
}
/**
* security_ismaclabel() - Check is the named attribute is a MAC label
* @name: full extended attribute name
*
* Check if the extended attribute specified by @name represents a MAC label.
*
* Return: Returns 1 if name is a MAC attribute otherwise returns 0.
*/
int security_ismaclabel(const char *name)
{
return call_int_hook(ismaclabel, 0, name);
}
EXPORT_SYMBOL(security_ismaclabel);
/**
* security_secid_to_secctx() - Convert a secid to a secctx
* @secid: secid
* @secdata: secctx
* @seclen: secctx length
*
* Convert secid to security context. If @secdata is NULL the length of the
* result will be returned in @seclen, but no @secdata will be returned. This
* does mean that the length could change between calls to check the length and
* the next call which actually allocates and returns the @secdata.
*
* Return: Return 0 on success, error on failure.
*/
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
struct security_hook_list *hp;
int rc;
/*
* Currently, only one LSM can implement secid_to_secctx (i.e this
* LSM hook is not "stackable").
*/
hlist_for_each_entry(hp, &security_hook_heads.secid_to_secctx, list) {
rc = hp->hook.secid_to_secctx(secid, secdata, seclen);
if (rc != LSM_RET_DEFAULT(secid_to_secctx))
return rc;
}
return LSM_RET_DEFAULT(secid_to_secctx);
}
EXPORT_SYMBOL(security_secid_to_secctx);
/**
* security_secctx_to_secid() - Convert a secctx to a secid
* @secdata: secctx
* @seclen: length of secctx
* @secid: secid
*
* Convert security context to secid.
*
* Return: Returns 0 on success, error on failure.
*/
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
*secid = 0;
return call_int_hook(secctx_to_secid, 0, secdata, seclen, secid);
}
EXPORT_SYMBOL(security_secctx_to_secid);
/**
* security_release_secctx() - Free a secctx buffer
* @secdata: secctx
* @seclen: length of secctx
*
* Release the security context.
*/
void security_release_secctx(char *secdata, u32 seclen)
{
call_void_hook(release_secctx, secdata, seclen);
}
EXPORT_SYMBOL(security_release_secctx);
/**
* security_inode_invalidate_secctx() - Invalidate an inode's security label
* @inode: inode
*
* Notify the security module that it must revalidate the security context of
* an inode.
*/
void security_inode_invalidate_secctx(struct inode *inode)
{
call_void_hook(inode_invalidate_secctx, inode);
}
EXPORT_SYMBOL(security_inode_invalidate_secctx);
/**
* security_inode_notifysecctx() - Nofify the LSM of an inode's security label
* @inode: inode
* @ctx: secctx
* @ctxlen: length of secctx
*
* Notify the security module of what the security context of an inode should
* be. Initializes the incore security context managed by the security module
* for this inode. Example usage: NFS client invokes this hook to initialize
* the security context in its incore inode to the value provided by the server
* for the file when the server returned the file's attributes to the client.
* Must be called with inode->i_mutex locked.
*
* Return: Returns 0 on success, error on failure.
*/
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
return call_int_hook(inode_notifysecctx, 0, inode, ctx, ctxlen);
}
EXPORT_SYMBOL(security_inode_notifysecctx);
/**
* security_inode_setsecctx() - Change the security label of an inode
* @dentry: inode
* @ctx: secctx
* @ctxlen: length of secctx
*
* Change the security context of an inode. Updates the incore security
* context managed by the security module and invokes the fs code as needed
* (via __vfs_setxattr_noperm) to update any backing xattrs that represent the
* context. Example usage: NFS server invokes this hook to change the security
* context in its incore inode and on the backing filesystem to a value
* provided by the client on a SETATTR operation. Must be called with
* inode->i_mutex locked.
*
* Return: Returns 0 on success, error on failure.
*/
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
return call_int_hook(inode_setsecctx, 0, dentry, ctx, ctxlen);
}
EXPORT_SYMBOL(security_inode_setsecctx);
/**
* security_inode_getsecctx() - Get the security label of an inode
* @inode: inode
* @ctx: secctx
* @ctxlen: length of secctx
*
* On success, returns 0 and fills out @ctx and @ctxlen with the security
* context for the given @inode.
*
* Return: Returns 0 on success, error on failure.
*/
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
}
EXPORT_SYMBOL(security_inode_getsecctx);
#ifdef CONFIG_WATCH_QUEUE
/**
* security_post_notification() - Check if a watch notification can be posted
* @w_cred: credentials of the task that set the watch
* @cred: credentials of the task which triggered the watch
* @n: the notification
*
* Check to see if a watch notification can be posted to a particular queue.
*
* Return: Returns 0 if permission is granted.
*/
int security_post_notification(const struct cred *w_cred,
const struct cred *cred,
struct watch_notification *n)
{
return call_int_hook(post_notification, 0, w_cred, cred, n);
}
#endif /* CONFIG_WATCH_QUEUE */
#ifdef CONFIG_KEY_NOTIFICATIONS
/**
* security_watch_key() - Check if a task is allowed to watch for key events
* @key: the key to watch
*
* Check to see if a process is allowed to watch for event notifications from
* a key or keyring.
*
* Return: Returns 0 if permission is granted.
*/
int security_watch_key(struct key *key)
{
return call_int_hook(watch_key, 0, key);
}
#endif /* CONFIG_KEY_NOTIFICATIONS */
#ifdef CONFIG_SECURITY_NETWORK
/**
* security_unix_stream_connect() - Check if a AF_UNIX stream is allowed
* @sock: originating sock
* @other: peer sock
* @newsk: new sock
*
* Check permissions before establishing a Unix domain stream connection
* between @sock and @other.
*
* The @unix_stream_connect and @unix_may_send hooks were necessary because
* Linux provides an alternative to the conventional file name space for Unix
* domain sockets. Whereas binding and connecting to sockets in the file name
* space is mediated by the typical file permissions (and caught by the mknod
* and permission hooks in inode_security_ops), binding and connecting to
* sockets in the abstract name space is completely unmediated. Sufficient
* control of Unix domain sockets in the abstract name space isn't possible
* using only the socket layer hooks, since we need to know the actual target
* socket, which is not looked up until we are inside the af_unix code.
*
* Return: Returns 0 if permission is granted.
*/
int security_unix_stream_connect(struct sock *sock, struct sock *other,
struct sock *newsk)
{
return call_int_hook(unix_stream_connect, 0, sock, other, newsk);
}
EXPORT_SYMBOL(security_unix_stream_connect);
/**
* security_unix_may_send() - Check if AF_UNIX socket can send datagrams
* @sock: originating sock
* @other: peer sock
*
* Check permissions before connecting or sending datagrams from @sock to
* @other.
*
* The @unix_stream_connect and @unix_may_send hooks were necessary because
* Linux provides an alternative to the conventional file name space for Unix
* domain sockets. Whereas binding and connecting to sockets in the file name
* space is mediated by the typical file permissions (and caught by the mknod
* and permission hooks in inode_security_ops), binding and connecting to
* sockets in the abstract name space is completely unmediated. Sufficient
* control of Unix domain sockets in the abstract name space isn't possible
* using only the socket layer hooks, since we need to know the actual target
* socket, which is not looked up until we are inside the af_unix code.
*
* Return: Returns 0 if permission is granted.
*/
int security_unix_may_send(struct socket *sock, struct socket *other)
{
return call_int_hook(unix_may_send, 0, sock, other);
}
EXPORT_SYMBOL(security_unix_may_send);
/**
* security_socket_create() - Check if creating a new socket is allowed
* @family: protocol family
* @type: communications type
* @protocol: requested protocol
* @kern: set to 1 if a kernel socket is requested
*
* Check permissions prior to creating a new socket.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_create(int family, int type, int protocol, int kern)
{
return call_int_hook(socket_create, 0, family, type, protocol, kern);
}
/**
* security_socket_post_create() - Initialize a newly created socket
* @sock: socket
* @family: protocol family
* @type: communications type
* @protocol: requested protocol
* @kern: set to 1 if a kernel socket is requested
*
* This hook allows a module to update or allocate a per-socket security
* structure. Note that the security field was not added directly to the socket
* structure, but rather, the socket security information is stored in the
* associated inode. Typically, the inode alloc_security hook will allocate
* and attach security information to SOCK_INODE(sock)->i_security. This hook
* may be used to update the SOCK_INODE(sock)->i_security field with additional
* information that wasn't available when the inode was allocated.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
return call_int_hook(socket_post_create, 0, sock, family, type,
protocol, kern);
}
/**
* security_socket_socketpair() - Check if creating a socketpair is allowed
* @socka: first socket
* @sockb: second socket
*
* Check permissions before creating a fresh pair of sockets.
*
* Return: Returns 0 if permission is granted and the connection was
* established.
*/
int security_socket_socketpair(struct socket *socka, struct socket *sockb)
{
return call_int_hook(socket_socketpair, 0, socka, sockb);
}
EXPORT_SYMBOL(security_socket_socketpair);
/**
* security_socket_bind() - Check if a socket bind operation is allowed
* @sock: socket
* @address: requested bind address
* @addrlen: length of address
*
* Check permission before socket protocol layer bind operation is performed
* and the socket @sock is bound to the address specified in the @address
* parameter.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_bind(struct socket *sock,
struct sockaddr *address, int addrlen)
{
return call_int_hook(socket_bind, 0, sock, address, addrlen);
}
/**
* security_socket_connect() - Check if a socket connect operation is allowed
* @sock: socket
* @address: address of remote connection point
* @addrlen: length of address
*
* Check permission before socket protocol layer connect operation attempts to
* connect socket @sock to a remote address, @address.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_connect(struct socket *sock,
struct sockaddr *address, int addrlen)
{
return call_int_hook(socket_connect, 0, sock, address, addrlen);
}
/**
* security_socket_listen() - Check if a socket is allowed to listen
* @sock: socket
* @backlog: connection queue size
*
* Check permission before socket protocol layer listen operation.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_listen(struct socket *sock, int backlog)
{
return call_int_hook(socket_listen, 0, sock, backlog);
}
/**
* security_socket_accept() - Check if a socket is allowed to accept connections
* @sock: listening socket
* @newsock: newly creation connection socket
*
* Check permission before accepting a new connection. Note that the new
* socket, @newsock, has been created and some information copied to it, but
* the accept operation has not actually been performed.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_accept(struct socket *sock, struct socket *newsock)
{
return call_int_hook(socket_accept, 0, sock, newsock);
}
/**
* security_socket_sendmsg() - Check is sending a message is allowed
* @sock: sending socket
* @msg: message to send
* @size: size of message
*
* Check permission before transmitting a message to another socket.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
{
return call_int_hook(socket_sendmsg, 0, sock, msg, size);
}
/**
* security_socket_recvmsg() - Check if receiving a message is allowed
* @sock: receiving socket
* @msg: message to receive
* @size: size of message
* @flags: operational flags
*
* Check permission before receiving a message from a socket.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
int size, int flags)
{
return call_int_hook(socket_recvmsg, 0, sock, msg, size, flags);
}
/**
* security_socket_getsockname() - Check if reading the socket addr is allowed
* @sock: socket
*
* Check permission before reading the local address (name) of the socket
* object.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_getsockname(struct socket *sock)
{
return call_int_hook(socket_getsockname, 0, sock);
}
/**
* security_socket_getpeername() - Check if reading the peer's addr is allowed
* @sock: socket
*
* Check permission before the remote address (name) of a socket object.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_getpeername(struct socket *sock)
{
return call_int_hook(socket_getpeername, 0, sock);
}
/**
* security_socket_getsockopt() - Check if reading a socket option is allowed
* @sock: socket
* @level: option's protocol level
* @optname: option name
*
* Check permissions before retrieving the options associated with socket
* @sock.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_getsockopt(struct socket *sock, int level, int optname)
{
return call_int_hook(socket_getsockopt, 0, sock, level, optname);
}
/**
* security_socket_setsockopt() - Check if setting a socket option is allowed
* @sock: socket
* @level: option's protocol level
* @optname: option name
*
* Check permissions before setting the options associated with socket @sock.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_setsockopt(struct socket *sock, int level, int optname)
{
return call_int_hook(socket_setsockopt, 0, sock, level, optname);
}
/**
* security_socket_shutdown() - Checks if shutting down the socket is allowed
* @sock: socket
* @how: flag indicating how sends and receives are handled
*
* Checks permission before all or part of a connection on the socket @sock is
* shut down.
*
* Return: Returns 0 if permission is granted.
*/
int security_socket_shutdown(struct socket *sock, int how)
{
return call_int_hook(socket_shutdown, 0, sock, how);
}
/**
* security_sock_rcv_skb() - Check if an incoming network packet is allowed
* @sk: destination sock
* @skb: incoming packet
*
* Check permissions on incoming network packets. This hook is distinct from
* Netfilter's IP input hooks since it is the first time that the incoming
* sk_buff @skb has been associated with a particular socket, @sk. Must not
* sleep inside this hook because some callers hold spinlocks.
*
* Return: Returns 0 if permission is granted.
*/
int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
return call_int_hook(socket_sock_rcv_skb, 0, sk, skb);
}
EXPORT_SYMBOL(security_sock_rcv_skb);
/**
* security_socket_getpeersec_stream() - Get the remote peer label
* @sock: socket
* @optval: destination buffer
* @optlen: size of peer label copied into the buffer
* @len: maximum size of the destination buffer
*
* This hook allows the security module to provide peer socket security state
* for unix or connected tcp sockets to userspace via getsockopt SO_GETPEERSEC.
* For tcp sockets this can be meaningful if the socket is associated with an
* ipsec SA.
*
* Return: Returns 0 if all is well, otherwise, typical getsockopt return
* values.
*/
int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
sockptr_t optlen, unsigned int len)
{
return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
optval, optlen, len);
}
/**
* security_socket_getpeersec_dgram() - Get the remote peer label
* @sock: socket
* @skb: datagram packet
* @secid: remote peer label secid
*
* This hook allows the security module to provide peer socket security state
* for udp sockets on a per-packet basis to userspace via getsockopt
* SO_GETPEERSEC. The application must first have indicated the IP_PASSSEC
* option via getsockopt. It can then retrieve the security state returned by
* this hook for a packet via the SCM_SECURITY ancillary message type.
*
* Return: Returns 0 on success, error on failure.
*/
int security_socket_getpeersec_dgram(struct socket *sock,
struct sk_buff *skb, u32 *secid)
{
return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
skb, secid);
}
EXPORT_SYMBOL(security_socket_getpeersec_dgram);
/**
* security_sk_alloc() - Allocate and initialize a sock's LSM blob
* @sk: sock
* @family: protocol family
* @priority: gfp flags
*
* Allocate and attach a security structure to the sk->sk_security field, which
* is used to copy security attributes between local stream sockets.
*
* Return: Returns 0 on success, error on failure.
*/
int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
return call_int_hook(sk_alloc_security, 0, sk, family, priority);
}
/**
* security_sk_free() - Free the sock's LSM blob
* @sk: sock
*
* Deallocate security structure.
*/
void security_sk_free(struct sock *sk)
{
call_void_hook(sk_free_security, sk);
}
/**
* security_sk_clone() - Clone a sock's LSM state
* @sk: original sock
* @newsk: target sock
*
* Clone/copy security structure.
*/
void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
call_void_hook(sk_clone_security, sk, newsk);
}
EXPORT_SYMBOL(security_sk_clone);
/**
* security_sk_classify_flow() - Set a flow's secid based on socket
* @sk: original socket
* @flic: target flow
*
* Set the target flow's secid to socket's secid.
*/
void security_sk_classify_flow(const struct sock *sk, struct flowi_common *flic)
{
call_void_hook(sk_getsecid, sk, &flic->flowic_secid);
}
EXPORT_SYMBOL(security_sk_classify_flow);
/**
* security_req_classify_flow() - Set a flow's secid based on request_sock
* @req: request_sock
* @flic: target flow
*
* Sets @flic's secid to @req's secid.
*/
void security_req_classify_flow(const struct request_sock *req,
struct flowi_common *flic)
{
call_void_hook(req_classify_flow, req, flic);
}
EXPORT_SYMBOL(security_req_classify_flow);
/**
* security_sock_graft() - Reconcile LSM state when grafting a sock on a socket
* @sk: sock being grafted
* @parent: target parent socket
*
* Sets @parent's inode secid to @sk's secid and update @sk with any necessary
* LSM state from @parent.
*/
void security_sock_graft(struct sock *sk, struct socket *parent)
{
call_void_hook(sock_graft, sk, parent);
}
EXPORT_SYMBOL(security_sock_graft);
/**
* security_inet_conn_request() - Set request_sock state using incoming connect
* @sk: parent listening sock
* @skb: incoming connection
* @req: new request_sock
*
* Initialize the @req LSM state based on @sk and the incoming connect in @skb.
*
* Return: Returns 0 if permission is granted.
*/
int security_inet_conn_request(const struct sock *sk,
struct sk_buff *skb, struct request_sock *req)
{
return call_int_hook(inet_conn_request, 0, sk, skb, req);
}
EXPORT_SYMBOL(security_inet_conn_request);
/**
* security_inet_csk_clone() - Set new sock LSM state based on request_sock
* @newsk: new sock
* @req: connection request_sock
*
* Set that LSM state of @sock using the LSM state from @req.
*/
void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req)
{
call_void_hook(inet_csk_clone, newsk, req);
}
/**
* security_inet_conn_established() - Update sock's LSM state with connection
* @sk: sock
* @skb: connection packet
*
* Update @sock's LSM state to represent a new connection from @skb.
*/
void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb)
{
call_void_hook(inet_conn_established, sk, skb);
}
EXPORT_SYMBOL(security_inet_conn_established);
/**
* security_secmark_relabel_packet() - Check if setting a secmark is allowed
* @secid: new secmark value
*
* Check if the process should be allowed to relabel packets to @secid.
*
* Return: Returns 0 if permission is granted.
*/
int security_secmark_relabel_packet(u32 secid)
{
return call_int_hook(secmark_relabel_packet, 0, secid);
}
EXPORT_SYMBOL(security_secmark_relabel_packet);
/**
* security_secmark_refcount_inc() - Increment the secmark labeling rule count
*
* Tells the LSM to increment the number of secmark labeling rules loaded.
*/
void security_secmark_refcount_inc(void)
{
call_void_hook(secmark_refcount_inc);
}
EXPORT_SYMBOL(security_secmark_refcount_inc);
/**
* security_secmark_refcount_dec() - Decrement the secmark labeling rule count
*
* Tells the LSM to decrement the number of secmark labeling rules loaded.
*/
void security_secmark_refcount_dec(void)
{
call_void_hook(secmark_refcount_dec);
}
EXPORT_SYMBOL(security_secmark_refcount_dec);
/**
* security_tun_dev_alloc_security() - Allocate a LSM blob for a TUN device
* @security: pointer to the LSM blob
*
* This hook allows a module to allocate a security structure for a TUN device,
* returning the pointer in @security.
*
* Return: Returns a zero on success, negative values on failure.
*/
int security_tun_dev_alloc_security(void **security)
{
return call_int_hook(tun_dev_alloc_security, 0, security);
}
EXPORT_SYMBOL(security_tun_dev_alloc_security);
/**
* security_tun_dev_free_security() - Free a TUN device LSM blob
* @security: LSM blob
*
* This hook allows a module to free the security structure for a TUN device.
*/
void security_tun_dev_free_security(void *security)
{
call_void_hook(tun_dev_free_security, security);
}
EXPORT_SYMBOL(security_tun_dev_free_security);
/**
* security_tun_dev_create() - Check if creating a TUN device is allowed
*
* Check permissions prior to creating a new TUN device.
*
* Return: Returns 0 if permission is granted.
*/
int security_tun_dev_create(void)
{
return call_int_hook(tun_dev_create, 0);
}
EXPORT_SYMBOL(security_tun_dev_create);
/**
* security_tun_dev_attach_queue() - Check if attaching a TUN queue is allowed
* @security: TUN device LSM blob
*
* Check permissions prior to attaching to a TUN device queue.
*
* Return: Returns 0 if permission is granted.
*/
int security_tun_dev_attach_queue(void *security)
{
return call_int_hook(tun_dev_attach_queue, 0, security);
}
EXPORT_SYMBOL(security_tun_dev_attach_queue);
/**
* security_tun_dev_attach() - Update TUN device LSM state on attach
* @sk: associated sock
* @security: TUN device LSM blob
*
* This hook can be used by the module to update any security state associated
* with the TUN device's sock structure.
*
* Return: Returns 0 if permission is granted.
*/
int security_tun_dev_attach(struct sock *sk, void *security)
{
return call_int_hook(tun_dev_attach, 0, sk, security);
}
EXPORT_SYMBOL(security_tun_dev_attach);
/**
* security_tun_dev_open() - Update TUN device LSM state on open
* @security: TUN device LSM blob
*
* This hook can be used by the module to update any security state associated
* with the TUN device's security structure.
*
* Return: Returns 0 if permission is granted.
*/
int security_tun_dev_open(void *security)
{
return call_int_hook(tun_dev_open, 0, security);
}
EXPORT_SYMBOL(security_tun_dev_open);
/**
* security_sctp_assoc_request() - Update the LSM on a SCTP association req
* @asoc: SCTP association
* @skb: packet requesting the association
*
* Passes the @asoc and @chunk->skb of the association INIT packet to the LSM.
*
* Return: Returns 0 on success, error on failure.
*/
int security_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
return call_int_hook(sctp_assoc_request, 0, asoc, skb);
}
EXPORT_SYMBOL(security_sctp_assoc_request);
/**
* security_sctp_bind_connect() - Validate a list of addrs for a SCTP option
* @sk: socket
* @optname: SCTP option to validate
* @address: list of IP addresses to validate
* @addrlen: length of the address list
*
* Validiate permissions required for each address associated with sock @sk.
* Depending on @optname, the addresses will be treated as either a connect or
* bind service. The @addrlen is calculated on each IPv4 and IPv6 address using
* sizeof(struct sockaddr_in) or sizeof(struct sockaddr_in6).
*
* Return: Returns 0 on success, error on failure.
*/
int security_sctp_bind_connect(struct sock *sk, int optname,
struct sockaddr *address, int addrlen)
{
return call_int_hook(sctp_bind_connect, 0, sk, optname,
address, addrlen);
}
EXPORT_SYMBOL(security_sctp_bind_connect);
/**
* security_sctp_sk_clone() - Clone a SCTP sock's LSM state
* @asoc: SCTP association
* @sk: original sock
* @newsk: target sock
*
* Called whenever a new socket is created by accept(2) (i.e. a TCP style
* socket) or when a socket is 'peeled off' e.g userspace calls
* sctp_peeloff(3).
*/
void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk)
{
call_void_hook(sctp_sk_clone, asoc, sk, newsk);
}
EXPORT_SYMBOL(security_sctp_sk_clone);
/**
* security_sctp_assoc_established() - Update LSM state when assoc established
* @asoc: SCTP association
* @skb: packet establishing the association
*
* Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet to the
* security module.
*
* Return: Returns 0 if permission is granted.
*/
int security_sctp_assoc_established(struct sctp_association *asoc,
struct sk_buff *skb)
{
return call_int_hook(sctp_assoc_established, 0, asoc, skb);
}
EXPORT_SYMBOL(security_sctp_assoc_established);
/**
* security_mptcp_add_subflow() - Inherit the LSM label from the MPTCP socket
* @sk: the owning MPTCP socket
* @ssk: the new subflow
*
* Update the labeling for the given MPTCP subflow, to match the one of the
* owning MPTCP socket. This hook has to be called after the socket creation and
* initialization via the security_socket_create() and
* security_socket_post_create() LSM hooks.
*
* Return: Returns 0 on success or a negative error code on failure.
*/
int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk)
{
return call_int_hook(mptcp_add_subflow, 0, sk, ssk);
}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
/**
* security_ib_pkey_access() - Check if access to an IB pkey is allowed
* @sec: LSM blob
* @subnet_prefix: subnet prefix of the port
* @pkey: IB pkey
*
* Check permission to access a pkey when modifying a QP.
*
* Return: Returns 0 if permission is granted.
*/
int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
{
return call_int_hook(ib_pkey_access, 0, sec, subnet_prefix, pkey);
}
EXPORT_SYMBOL(security_ib_pkey_access);
/**
* security_ib_endport_manage_subnet() - Check if SMPs traffic is allowed
* @sec: LSM blob
* @dev_name: IB device name
* @port_num: port number
*
* Check permissions to send and receive SMPs on a end port.
*
* Return: Returns 0 if permission is granted.
*/
int security_ib_endport_manage_subnet(void *sec,
const char *dev_name, u8 port_num)
{
return call_int_hook(ib_endport_manage_subnet, 0, sec,
dev_name, port_num);
}
EXPORT_SYMBOL(security_ib_endport_manage_subnet);
/**
* security_ib_alloc_security() - Allocate an Infiniband LSM blob
* @sec: LSM blob
*
* Allocate a security structure for Infiniband objects.
*
* Return: Returns 0 on success, non-zero on failure.
*/
int security_ib_alloc_security(void **sec)
{
return call_int_hook(ib_alloc_security, 0, sec);
}
EXPORT_SYMBOL(security_ib_alloc_security);
/**
* security_ib_free_security() - Free an Infiniband LSM blob
* @sec: LSM blob
*
* Deallocate an Infiniband security structure.
*/
void security_ib_free_security(void *sec)
{
call_void_hook(ib_free_security, sec);
}
EXPORT_SYMBOL(security_ib_free_security);
#endif /* CONFIG_SECURITY_INFINIBAND */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
/**
* security_xfrm_policy_alloc() - Allocate a xfrm policy LSM blob
* @ctxp: xfrm security context being added to the SPD
* @sec_ctx: security label provided by userspace
* @gfp: gfp flags
*
* Allocate a security structure to the xp->security field; the security field
* is initialized to NULL when the xfrm_policy is allocated.
*
* Return: Return 0 if operation was successful.
*/
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *sec_ctx,
gfp_t gfp)
{
return call_int_hook(xfrm_policy_alloc_security, 0, ctxp, sec_ctx, gfp);
}
EXPORT_SYMBOL(security_xfrm_policy_alloc);
/**
* security_xfrm_policy_clone() - Clone xfrm policy LSM state
* @old_ctx: xfrm security context
* @new_ctxp: target xfrm security context
*
* Allocate a security structure in new_ctxp that contains the information from
* the old_ctx structure.
*
* Return: Return 0 if operation was successful.
*/
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp)
{
return call_int_hook(xfrm_policy_clone_security, 0, old_ctx, new_ctxp);
}
/**
* security_xfrm_policy_free() - Free a xfrm security context
* @ctx: xfrm security context
*
* Free LSM resources associated with @ctx.
*/
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
call_void_hook(xfrm_policy_free_security, ctx);
}
EXPORT_SYMBOL(security_xfrm_policy_free);
/**
* security_xfrm_policy_delete() - Check if deleting a xfrm policy is allowed
* @ctx: xfrm security context
*
* Authorize deletion of a SPD entry.
*
* Return: Returns 0 if permission is granted.
*/
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
return call_int_hook(xfrm_policy_delete_security, 0, ctx);
}
/**
* security_xfrm_state_alloc() - Allocate a xfrm state LSM blob
* @x: xfrm state being added to the SAD
* @sec_ctx: security label provided by userspace
*
* Allocate a security structure to the @x->security field; the security field
* is initialized to NULL when the xfrm_state is allocated. Set the context to
* correspond to @sec_ctx.
*
* Return: Return 0 if operation was successful.
*/
int security_xfrm_state_alloc(struct xfrm_state *x,
struct xfrm_user_sec_ctx *sec_ctx)
{
return call_int_hook(xfrm_state_alloc, 0, x, sec_ctx);
}
EXPORT_SYMBOL(security_xfrm_state_alloc);
/**
* security_xfrm_state_alloc_acquire() - Allocate a xfrm state LSM blob
* @x: xfrm state being added to the SAD
* @polsec: associated policy's security context
* @secid: secid from the flow
*
* Allocate a security structure to the x->security field; the security field
* is initialized to NULL when the xfrm_state is allocated. Set the context to
* correspond to secid.
*
* Return: Returns 0 if operation was successful.
*/
int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
return call_int_hook(xfrm_state_alloc_acquire, 0, x, polsec, secid);
}
/**
* security_xfrm_state_delete() - Check if deleting a xfrm state is allowed
* @x: xfrm state
*
* Authorize deletion of x->security.
*
* Return: Returns 0 if permission is granted.
*/
int security_xfrm_state_delete(struct xfrm_state *x)
{
return call_int_hook(xfrm_state_delete_security, 0, x);
}
EXPORT_SYMBOL(security_xfrm_state_delete);
/**
* security_xfrm_state_free() - Free a xfrm state
* @x: xfrm state
*
* Deallocate x->security.
*/
void security_xfrm_state_free(struct xfrm_state *x)
{
call_void_hook(xfrm_state_free_security, x);
}
/**
* security_xfrm_policy_lookup() - Check if using a xfrm policy is allowed
* @ctx: target xfrm security context
* @fl_secid: flow secid used to authorize access
*
* Check permission when a flow selects a xfrm_policy for processing XFRMs on a
* packet. The hook is called when selecting either a per-socket policy or a
* generic xfrm policy.
*
* Return: Return 0 if permission is granted, -ESRCH otherwise, or -errno on
* other errors.
*/
int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
return call_int_hook(xfrm_policy_lookup, 0, ctx, fl_secid);
}
/**
* security_xfrm_state_pol_flow_match() - Check for a xfrm match
* @x: xfrm state to match
* @xp: xfrm policy to check for a match
* @flic: flow to check for a match.
*
* Check @xp and @flic for a match with @x.
*
* Return: Returns 1 if there is a match.
*/
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
const struct flowi_common *flic)
{
struct security_hook_list *hp;
int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match);
/*
* Since this function is expected to return 0 or 1, the judgment
* becomes difficult if multiple LSMs supply this call. Fortunately,
* we can use the first LSM's judgment because currently only SELinux
* supplies this call.
*
* For speed optimization, we explicitly break the loop rather than
* using the macro
*/
hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
list) {
rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic);
break;
}
return rc;
}
/**
* security_xfrm_decode_session() - Determine the xfrm secid for a packet
* @skb: xfrm packet
* @secid: secid
*
* Decode the packet in @skb and return the security label in @secid.
*
* Return: Return 0 if all xfrms used have the same secid.
*/
int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
{
return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
}
void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic)
{
int rc = call_int_hook(xfrm_decode_session, 0, skb, &flic->flowic_secid,
0);
BUG_ON(rc);
}
EXPORT_SYMBOL(security_skb_classify_flow);
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
#ifdef CONFIG_KEYS
/**
* security_key_alloc() - Allocate and initialize a kernel key LSM blob
* @key: key
* @cred: credentials
* @flags: allocation flags
*
* Permit allocation of a key and assign security data. Note that key does not
* have a serial number assigned at this point.
*
* Return: Return 0 if permission is granted, -ve error otherwise.
*/
int security_key_alloc(struct key *key, const struct cred *cred,
unsigned long flags)
{
return call_int_hook(key_alloc, 0, key, cred, flags);
}
/**
* security_key_free() - Free a kernel key LSM blob
* @key: key
*
* Notification of destruction; free security data.
*/
void security_key_free(struct key *key)
{
call_void_hook(key_free, key);
}
/**
* security_key_permission() - Check if a kernel key operation is allowed
* @key_ref: key reference
* @cred: credentials of actor requesting access
* @need_perm: requested permissions
*
* See whether a specific operational right is granted to a process on a key.
*
* Return: Return 0 if permission is granted, -ve error otherwise.
*/
int security_key_permission(key_ref_t key_ref, const struct cred *cred,
enum key_need_perm need_perm)
{
return call_int_hook(key_permission, 0, key_ref, cred, need_perm);
}
/**
* security_key_getsecurity() - Get the key's security label
* @key: key
* @buffer: security label buffer
*
* Get a textual representation of the security context attached to a key for
* the purposes of honouring KEYCTL_GETSECURITY. This function allocates the
* storage for the NUL-terminated string and the caller should free it.
*
* Return: Returns the length of @buffer (including terminating NUL) or -ve if
* an error occurs. May also return 0 (and a NULL buffer pointer) if
* there is no security label assigned to the key.
*/
int security_key_getsecurity(struct key *key, char **buffer)
{
*buffer = NULL;
return call_int_hook(key_getsecurity, 0, key, buffer);
}
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
/**
* security_audit_rule_init() - Allocate and init an LSM audit rule struct
* @field: audit action
* @op: rule operator
* @rulestr: rule context
* @lsmrule: receive buffer for audit rule struct
*
* Allocate and initialize an LSM audit rule structure.
*
* Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of
* an invalid rule.
*/
int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
{
return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
}
/**
* security_audit_rule_known() - Check if an audit rule contains LSM fields
* @krule: audit rule
*
* Specifies whether given @krule contains any fields related to the current
* LSM.
*
* Return: Returns 1 in case of relation found, 0 otherwise.
*/
int security_audit_rule_known(struct audit_krule *krule)
{
return call_int_hook(audit_rule_known, 0, krule);
}
/**
* security_audit_rule_free() - Free an LSM audit rule struct
* @lsmrule: audit rule struct
*
* Deallocate the LSM audit rule structure previously allocated by
* audit_rule_init().
*/
void security_audit_rule_free(void *lsmrule)
{
call_void_hook(audit_rule_free, lsmrule);
}
/**
* security_audit_rule_match() - Check if a label matches an audit rule
* @secid: security label
* @field: LSM audit field
* @op: matching operator
* @lsmrule: audit rule
*
* Determine if given @secid matches a rule previously approved by
* security_audit_rule_known().
*
* Return: Returns 1 if secid matches the rule, 0 if it does not, -ERRNO on
* failure.
*/
int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule)
{
return call_int_hook(audit_rule_match, 0, secid, field, op, lsmrule);
}
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
/**
* security_bpf() - Check if the bpf syscall operation is allowed
* @cmd: command
* @attr: bpf attribute
* @size: size
*
* Do a initial check for all bpf syscalls after the attribute is copied into
* the kernel. The actual security module can implement their own rules to
* check the specific cmd they need.
*
* Return: Returns 0 if permission is granted.
*/
int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
{
return call_int_hook(bpf, 0, cmd, attr, size);
}
/**
* security_bpf_map() - Check if access to a bpf map is allowed
* @map: bpf map
* @fmode: mode
*
* Do a check when the kernel generates and returns a file descriptor for eBPF
* maps.
*
* Return: Returns 0 if permission is granted.
*/
int security_bpf_map(struct bpf_map *map, fmode_t fmode)
{
return call_int_hook(bpf_map, 0, map, fmode);
}
/**
* security_bpf_prog() - Check if access to a bpf program is allowed
* @prog: bpf program
*
* Do a check when the kernel generates and returns a file descriptor for eBPF
* programs.
*
* Return: Returns 0 if permission is granted.
*/
int security_bpf_prog(struct bpf_prog *prog)
{
return call_int_hook(bpf_prog, 0, prog);
}
/**
* security_bpf_map_alloc() - Allocate a bpf map LSM blob
* @map: bpf map
*
* Initialize the security field inside bpf map.
*
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_map_alloc(struct bpf_map *map)
{
return call_int_hook(bpf_map_alloc_security, 0, map);
}
/**
* security_bpf_prog_alloc() - Allocate a bpf program LSM blob
* @aux: bpf program aux info struct
*
* Initialize the security field inside bpf program.
*
* Return: Returns 0 on success, error on failure.
*/
int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
{
return call_int_hook(bpf_prog_alloc_security, 0, aux);
}
/**
* security_bpf_map_free() - Free a bpf map's LSM blob
* @map: bpf map
*
* Clean up the security information stored inside bpf map.
*/
void security_bpf_map_free(struct bpf_map *map)
{
call_void_hook(bpf_map_free_security, map);
}
/**
* security_bpf_prog_free() - Free a bpf program's LSM blob
* @aux: bpf program aux info struct
*
* Clean up the security information stored inside bpf prog.
*/
void security_bpf_prog_free(struct bpf_prog_aux *aux)
{
call_void_hook(bpf_prog_free_security, aux);
}
#endif /* CONFIG_BPF_SYSCALL */
/**
* security_locked_down() - Check if a kernel feature is allowed
* @what: requested kernel feature
*
* Determine whether a kernel feature that potentially enables arbitrary code
* execution in kernel space should be permitted.
*
* Return: Returns 0 if permission is granted.
*/
int security_locked_down(enum lockdown_reason what)
{
return call_int_hook(locked_down, 0, what);
}
EXPORT_SYMBOL(security_locked_down);
#ifdef CONFIG_PERF_EVENTS
/**
* security_perf_event_open() - Check if a perf event open is allowed
* @attr: perf event attribute
* @type: type of event
*
* Check whether the @type of perf_event_open syscall is allowed.
*
* Return: Returns 0 if permission is granted.
*/
int security_perf_event_open(struct perf_event_attr *attr, int type)
{
return call_int_hook(perf_event_open, 0, attr, type);
}
/**
* security_perf_event_alloc() - Allocate a perf event LSM blob
* @event: perf event
*
* Allocate and save perf_event security info.
*
* Return: Returns 0 on success, error on failure.
*/
int security_perf_event_alloc(struct perf_event *event)
{
return call_int_hook(perf_event_alloc, 0, event);
}
/**
* security_perf_event_free() - Free a perf event LSM blob
* @event: perf event
*
* Release (free) perf_event security info.
*/
void security_perf_event_free(struct perf_event *event)
{
call_void_hook(perf_event_free, event);
}
/**
* security_perf_event_read() - Check if reading a perf event label is allowed
* @event: perf event
*
* Read perf_event security info if allowed.
*
* Return: Returns 0 if permission is granted.
*/
int security_perf_event_read(struct perf_event *event)
{
return call_int_hook(perf_event_read, 0, event);
}
/**
* security_perf_event_write() - Check if writing a perf event label is allowed
* @event: perf event
*
* Write perf_event security info if allowed.
*
* Return: Returns 0 if permission is granted.
*/
int security_perf_event_write(struct perf_event *event)
{
return call_int_hook(perf_event_write, 0, event);
}
#endif /* CONFIG_PERF_EVENTS */
#ifdef CONFIG_IO_URING
/**
* security_uring_override_creds() - Check if overriding creds is allowed
* @new: new credentials
*
* Check if the current task, executing an io_uring operation, is allowed to
* override it's credentials with @new.
*
* Return: Returns 0 if permission is granted.
*/
int security_uring_override_creds(const struct cred *new)
{
return call_int_hook(uring_override_creds, 0, new);
}
/**
* security_uring_sqpoll() - Check if IORING_SETUP_SQPOLL is allowed
*
* Check whether the current task is allowed to spawn a io_uring polling thread
* (IORING_SETUP_SQPOLL).
*
* Return: Returns 0 if permission is granted.
*/
int security_uring_sqpoll(void)
{
return call_int_hook(uring_sqpoll, 0);
}
/**
* security_uring_cmd() - Check if a io_uring passthrough command is allowed
* @ioucmd: command
*
* Check whether the file_operations uring_cmd is allowed to run.
*
* Return: Returns 0 if permission is granted.
*/
int security_uring_cmd(struct io_uring_cmd *ioucmd)
{
return call_int_hook(uring_cmd, 0, ioucmd);
}
#endif /* CONFIG_IO_URING */
| linux-master | security/security.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Common capabilities, needed by capability.o.
*/
#include <linux/capability.h>
#include <linux/audit.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/lsm_hooks.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/ptrace.h>
#include <linux/xattr.h>
#include <linux/hugetlb.h>
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/prctl.h>
#include <linux/securebits.h>
#include <linux/user_namespace.h>
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/mnt_idmapping.h>
/*
* If a non-root user executes a setuid-root binary in
* !secure(SECURE_NOROOT) mode, then we raise capabilities.
* However if fE is also set, then the intent is for only
* the file capabilities to be applied, and the setuid-root
* bit is left on either to change the uid (plausible) or
* to get full privilege on a kernel without file capabilities
* support. So in that case we do not raise capabilities.
*
* Warn if that happens, once per boot.
*/
static void warn_setuid_and_fcaps_mixed(const char *fname)
{
static int warned;
if (!warned) {
printk(KERN_INFO "warning: `%s' has both setuid-root and"
" effective capabilities. Therefore not raising all"
" capabilities.\n", fname);
warned = 1;
}
}
/**
* cap_capable - Determine whether a task has a particular effective capability
* @cred: The credentials to use
* @targ_ns: The user namespace in which we need the capability
* @cap: The capability to check for
* @opts: Bitmask of options defined in include/linux/security.h
*
* Determine whether the nominated task has the specified capability amongst
* its effective set, returning 0 if it does, -ve if it does not.
*
* NOTE WELL: cap_has_capability() cannot be used like the kernel's capable()
* and has_capability() functions. That is, it has the reverse semantics:
* cap_has_capability() returns 0 when a task has a capability, but the
* kernel's capable() and has_capability() returns 1 for this case.
*/
int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
int cap, unsigned int opts)
{
struct user_namespace *ns = targ_ns;
/* See if cred has the capability in the target user namespace
* by examining the target user namespace and all of the target
* user namespace's parents.
*/
for (;;) {
/* Do we have the necessary capabilities? */
if (ns == cred->user_ns)
return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
/*
* If we're already at a lower level than we're looking for,
* we're done searching.
*/
if (ns->level <= cred->user_ns->level)
return -EPERM;
/*
* The owner of the user namespace in the parent of the
* user namespace has all caps.
*/
if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid))
return 0;
/*
* If you have a capability in a parent user ns, then you have
* it over all children user namespaces as well.
*/
ns = ns->parent;
}
/* We never get here */
}
/**
* cap_settime - Determine whether the current process may set the system clock
* @ts: The time to set
* @tz: The timezone to set
*
* Determine whether the current process may set the system clock and timezone
* information, returning 0 if permission granted, -ve if denied.
*/
int cap_settime(const struct timespec64 *ts, const struct timezone *tz)
{
if (!capable(CAP_SYS_TIME))
return -EPERM;
return 0;
}
/**
* cap_ptrace_access_check - Determine whether the current process may access
* another
* @child: The process to be accessed
* @mode: The mode of attachment.
*
* If we are in the same or an ancestor user_ns and have all the target
* task's capabilities, then ptrace access is allowed.
* If we have the ptrace capability to the target user_ns, then ptrace
* access is allowed.
* Else denied.
*
* Determine whether a process may access another, returning 0 if permission
* granted, -ve if denied.
*/
int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
int ret = 0;
const struct cred *cred, *child_cred;
const kernel_cap_t *caller_caps;
rcu_read_lock();
cred = current_cred();
child_cred = __task_cred(child);
if (mode & PTRACE_MODE_FSCREDS)
caller_caps = &cred->cap_effective;
else
caller_caps = &cred->cap_permitted;
if (cred->user_ns == child_cred->user_ns &&
cap_issubset(child_cred->cap_permitted, *caller_caps))
goto out;
if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
ret = -EPERM;
out:
rcu_read_unlock();
return ret;
}
/**
* cap_ptrace_traceme - Determine whether another process may trace the current
* @parent: The task proposed to be the tracer
*
* If parent is in the same or an ancestor user_ns and has all current's
* capabilities, then ptrace access is allowed.
* If parent has the ptrace capability to current's user_ns, then ptrace
* access is allowed.
* Else denied.
*
* Determine whether the nominated task is permitted to trace the current
* process, returning 0 if permission is granted, -ve if denied.
*/
int cap_ptrace_traceme(struct task_struct *parent)
{
int ret = 0;
const struct cred *cred, *child_cred;
rcu_read_lock();
cred = __task_cred(parent);
child_cred = current_cred();
if (cred->user_ns == child_cred->user_ns &&
cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
goto out;
if (has_ns_capability(parent, child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
ret = -EPERM;
out:
rcu_read_unlock();
return ret;
}
/**
* cap_capget - Retrieve a task's capability sets
* @target: The task from which to retrieve the capability sets
* @effective: The place to record the effective set
* @inheritable: The place to record the inheritable set
* @permitted: The place to record the permitted set
*
* This function retrieves the capabilities of the nominated task and returns
* them to the caller.
*/
int cap_capget(const struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
const struct cred *cred;
/* Derived from kernel/capability.c:sys_capget. */
rcu_read_lock();
cred = __task_cred(target);
*effective = cred->cap_effective;
*inheritable = cred->cap_inheritable;
*permitted = cred->cap_permitted;
rcu_read_unlock();
return 0;
}
/*
* Determine whether the inheritable capabilities are limited to the old
* permitted set. Returns 1 if they are limited, 0 if they are not.
*/
static inline int cap_inh_is_capped(void)
{
/* they are so limited unless the current task has the CAP_SETPCAP
* capability
*/
if (cap_capable(current_cred(), current_cred()->user_ns,
CAP_SETPCAP, CAP_OPT_NONE) == 0)
return 0;
return 1;
}
/**
* cap_capset - Validate and apply proposed changes to current's capabilities
* @new: The proposed new credentials; alterations should be made here
* @old: The current task's current credentials
* @effective: A pointer to the proposed new effective capabilities set
* @inheritable: A pointer to the proposed new inheritable capabilities set
* @permitted: A pointer to the proposed new permitted capabilities set
*
* This function validates and applies a proposed mass change to the current
* process's capability sets. The changes are made to the proposed new
* credentials, and assuming no error, will be committed by the caller of LSM.
*/
int cap_capset(struct cred *new,
const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
if (cap_inh_is_capped() &&
!cap_issubset(*inheritable,
cap_combine(old->cap_inheritable,
old->cap_permitted)))
/* incapable of using this inheritable set */
return -EPERM;
if (!cap_issubset(*inheritable,
cap_combine(old->cap_inheritable,
old->cap_bset)))
/* no new pI capabilities outside bounding set */
return -EPERM;
/* verify restrictions on target's new Permitted set */
if (!cap_issubset(*permitted, old->cap_permitted))
return -EPERM;
/* verify the _new_Effective_ is a subset of the _new_Permitted_ */
if (!cap_issubset(*effective, *permitted))
return -EPERM;
new->cap_effective = *effective;
new->cap_inheritable = *inheritable;
new->cap_permitted = *permitted;
/*
* Mask off ambient bits that are no longer both permitted and
* inheritable.
*/
new->cap_ambient = cap_intersect(new->cap_ambient,
cap_intersect(*permitted,
*inheritable));
if (WARN_ON(!cap_ambient_invariant_ok(new)))
return -EINVAL;
return 0;
}
/**
* cap_inode_need_killpriv - Determine if inode change affects privileges
* @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV
*
* Determine if an inode having a change applied that's marked ATTR_KILL_PRIV
* affects the security markings on that inode, and if it is, should
* inode_killpriv() be invoked or the change rejected.
*
* Return: 1 if security.capability has a value, meaning inode_killpriv()
* is required, 0 otherwise, meaning inode_killpriv() is not required.
*/
int cap_inode_need_killpriv(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
int error;
error = __vfs_getxattr(dentry, inode, XATTR_NAME_CAPS, NULL, 0);
return error > 0;
}
/**
* cap_inode_killpriv - Erase the security markings on an inode
*
* @idmap: idmap of the mount the inode was found from
* @dentry: The inode/dentry to alter
*
* Erase the privilege-enhancing security markings on an inode.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply pass @nop_mnt_idmap.
*
* Return: 0 if successful, -ve on error.
*/
int cap_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry)
{
int error;
error = __vfs_removexattr(idmap, dentry, XATTR_NAME_CAPS);
if (error == -EOPNOTSUPP)
error = 0;
return error;
}
static bool rootid_owns_currentns(vfsuid_t rootvfsuid)
{
struct user_namespace *ns;
kuid_t kroot;
if (!vfsuid_valid(rootvfsuid))
return false;
kroot = vfsuid_into_kuid(rootvfsuid);
for (ns = current_user_ns();; ns = ns->parent) {
if (from_kuid(ns, kroot) == 0)
return true;
if (ns == &init_user_ns)
break;
}
return false;
}
static __u32 sansflags(__u32 m)
{
return m & ~VFS_CAP_FLAGS_EFFECTIVE;
}
static bool is_v2header(int size, const struct vfs_cap_data *cap)
{
if (size != XATTR_CAPS_SZ_2)
return false;
return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2;
}
static bool is_v3header(int size, const struct vfs_cap_data *cap)
{
if (size != XATTR_CAPS_SZ_3)
return false;
return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3;
}
/*
* getsecurity: We are called for security.* before any attempt to read the
* xattr from the inode itself.
*
* This gives us a chance to read the on-disk value and convert it. If we
* return -EOPNOTSUPP, then vfs_getxattr() will call the i_op handler.
*
* Note we are not called by vfs_getxattr_alloc(), but that is only called
* by the integrity subsystem, which really wants the unconverted values -
* so that's good.
*/
int cap_inode_getsecurity(struct mnt_idmap *idmap,
struct inode *inode, const char *name, void **buffer,
bool alloc)
{
int size;
kuid_t kroot;
vfsuid_t vfsroot;
u32 nsmagic, magic;
uid_t root, mappedroot;
char *tmpbuf = NULL;
struct vfs_cap_data *cap;
struct vfs_ns_cap_data *nscap = NULL;
struct dentry *dentry;
struct user_namespace *fs_ns;
if (strcmp(name, "capability") != 0)
return -EOPNOTSUPP;
dentry = d_find_any_alias(inode);
if (!dentry)
return -EINVAL;
size = vfs_getxattr_alloc(idmap, dentry, XATTR_NAME_CAPS, &tmpbuf,
sizeof(struct vfs_ns_cap_data), GFP_NOFS);
dput(dentry);
/* gcc11 complains if we don't check for !tmpbuf */
if (size < 0 || !tmpbuf)
goto out_free;
fs_ns = inode->i_sb->s_user_ns;
cap = (struct vfs_cap_data *) tmpbuf;
if (is_v2header(size, cap)) {
root = 0;
} else if (is_v3header(size, cap)) {
nscap = (struct vfs_ns_cap_data *) tmpbuf;
root = le32_to_cpu(nscap->rootid);
} else {
size = -EINVAL;
goto out_free;
}
kroot = make_kuid(fs_ns, root);
/* If this is an idmapped mount shift the kuid. */
vfsroot = make_vfsuid(idmap, fs_ns, kroot);
/* If the root kuid maps to a valid uid in current ns, then return
* this as a nscap. */
mappedroot = from_kuid(current_user_ns(), vfsuid_into_kuid(vfsroot));
if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
size = sizeof(struct vfs_ns_cap_data);
if (alloc) {
if (!nscap) {
/* v2 -> v3 conversion */
nscap = kzalloc(size, GFP_ATOMIC);
if (!nscap) {
size = -ENOMEM;
goto out_free;
}
nsmagic = VFS_CAP_REVISION_3;
magic = le32_to_cpu(cap->magic_etc);
if (magic & VFS_CAP_FLAGS_EFFECTIVE)
nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
nscap->magic_etc = cpu_to_le32(nsmagic);
} else {
/* use allocated v3 buffer */
tmpbuf = NULL;
}
nscap->rootid = cpu_to_le32(mappedroot);
*buffer = nscap;
}
goto out_free;
}
if (!rootid_owns_currentns(vfsroot)) {
size = -EOVERFLOW;
goto out_free;
}
/* This comes from a parent namespace. Return as a v2 capability */
size = sizeof(struct vfs_cap_data);
if (alloc) {
if (nscap) {
/* v3 -> v2 conversion */
cap = kzalloc(size, GFP_ATOMIC);
if (!cap) {
size = -ENOMEM;
goto out_free;
}
magic = VFS_CAP_REVISION_2;
nsmagic = le32_to_cpu(nscap->magic_etc);
if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
magic |= VFS_CAP_FLAGS_EFFECTIVE;
memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
cap->magic_etc = cpu_to_le32(magic);
} else {
/* use unconverted v2 */
tmpbuf = NULL;
}
*buffer = cap;
}
out_free:
kfree(tmpbuf);
return size;
}
/**
* rootid_from_xattr - translate root uid of vfs caps
*
* @value: vfs caps value which may be modified by this function
* @size: size of @ivalue
* @task_ns: user namespace of the caller
*/
static vfsuid_t rootid_from_xattr(const void *value, size_t size,
struct user_namespace *task_ns)
{
const struct vfs_ns_cap_data *nscap = value;
uid_t rootid = 0;
if (size == XATTR_CAPS_SZ_3)
rootid = le32_to_cpu(nscap->rootid);
return VFSUIDT_INIT(make_kuid(task_ns, rootid));
}
static bool validheader(size_t size, const struct vfs_cap_data *cap)
{
return is_v2header(size, cap) || is_v3header(size, cap);
}
/**
* cap_convert_nscap - check vfs caps
*
* @idmap: idmap of the mount the inode was found from
* @dentry: used to retrieve inode to check permissions on
* @ivalue: vfs caps value which may be modified by this function
* @size: size of @ivalue
*
* User requested a write of security.capability. If needed, update the
* xattr to change from v2 to v3, or to fixup the v3 rootid.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply pass @nop_mnt_idmap.
*
* Return: On success, return the new size; on error, return < 0.
*/
int cap_convert_nscap(struct mnt_idmap *idmap, struct dentry *dentry,
const void **ivalue, size_t size)
{
struct vfs_ns_cap_data *nscap;
uid_t nsrootid;
const struct vfs_cap_data *cap = *ivalue;
__u32 magic, nsmagic;
struct inode *inode = d_backing_inode(dentry);
struct user_namespace *task_ns = current_user_ns(),
*fs_ns = inode->i_sb->s_user_ns;
kuid_t rootid;
vfsuid_t vfsrootid;
size_t newsize;
if (!*ivalue)
return -EINVAL;
if (!validheader(size, cap))
return -EINVAL;
if (!capable_wrt_inode_uidgid(idmap, inode, CAP_SETFCAP))
return -EPERM;
if (size == XATTR_CAPS_SZ_2 && (idmap == &nop_mnt_idmap))
if (ns_capable(inode->i_sb->s_user_ns, CAP_SETFCAP))
/* user is privileged, just write the v2 */
return size;
vfsrootid = rootid_from_xattr(*ivalue, size, task_ns);
if (!vfsuid_valid(vfsrootid))
return -EINVAL;
rootid = from_vfsuid(idmap, fs_ns, vfsrootid);
if (!uid_valid(rootid))
return -EINVAL;
nsrootid = from_kuid(fs_ns, rootid);
if (nsrootid == -1)
return -EINVAL;
newsize = sizeof(struct vfs_ns_cap_data);
nscap = kmalloc(newsize, GFP_ATOMIC);
if (!nscap)
return -ENOMEM;
nscap->rootid = cpu_to_le32(nsrootid);
nsmagic = VFS_CAP_REVISION_3;
magic = le32_to_cpu(cap->magic_etc);
if (magic & VFS_CAP_FLAGS_EFFECTIVE)
nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
nscap->magic_etc = cpu_to_le32(nsmagic);
memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
*ivalue = nscap;
return newsize;
}
/*
* Calculate the new process capability sets from the capability sets attached
* to a file.
*/
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
bool *effective,
bool *has_fcap)
{
struct cred *new = bprm->cred;
int ret = 0;
if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
*effective = true;
if (caps->magic_etc & VFS_CAP_REVISION_MASK)
*has_fcap = true;
/*
* pP' = (X & fP) | (pI & fI)
* The addition of pA' is handled later.
*/
new->cap_permitted.val =
(new->cap_bset.val & caps->permitted.val) |
(new->cap_inheritable.val & caps->inheritable.val);
if (caps->permitted.val & ~new->cap_permitted.val)
/* insufficient to execute correctly */
ret = -EPERM;
/*
* For legacy apps, with no internal support for recognizing they
* do not have enough capabilities, we return an error if they are
* missing some "forced" (aka file-permitted) capabilities.
*/
return *effective ? ret : 0;
}
/**
* get_vfs_caps_from_disk - retrieve vfs caps from disk
*
* @idmap: idmap of the mount the inode was found from
* @dentry: dentry from which @inode is retrieved
* @cpu_caps: vfs capabilities
*
* Extract the on-exec-apply capability sets for an executable file.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply pass @nop_mnt_idmap.
*/
int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
const struct dentry *dentry,
struct cpu_vfs_cap_data *cpu_caps)
{
struct inode *inode = d_backing_inode(dentry);
__u32 magic_etc;
int size;
struct vfs_ns_cap_data data, *nscaps = &data;
struct vfs_cap_data *caps = (struct vfs_cap_data *) &data;
kuid_t rootkuid;
vfsuid_t rootvfsuid;
struct user_namespace *fs_ns;
memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
if (!inode)
return -ENODATA;
fs_ns = inode->i_sb->s_user_ns;
size = __vfs_getxattr((struct dentry *)dentry, inode,
XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
if (size == -ENODATA || size == -EOPNOTSUPP)
/* no data, that's ok */
return -ENODATA;
if (size < 0)
return size;
if (size < sizeof(magic_etc))
return -EINVAL;
cpu_caps->magic_etc = magic_etc = le32_to_cpu(caps->magic_etc);
rootkuid = make_kuid(fs_ns, 0);
switch (magic_etc & VFS_CAP_REVISION_MASK) {
case VFS_CAP_REVISION_1:
if (size != XATTR_CAPS_SZ_1)
return -EINVAL;
break;
case VFS_CAP_REVISION_2:
if (size != XATTR_CAPS_SZ_2)
return -EINVAL;
break;
case VFS_CAP_REVISION_3:
if (size != XATTR_CAPS_SZ_3)
return -EINVAL;
rootkuid = make_kuid(fs_ns, le32_to_cpu(nscaps->rootid));
break;
default:
return -EINVAL;
}
rootvfsuid = make_vfsuid(idmap, fs_ns, rootkuid);
if (!vfsuid_valid(rootvfsuid))
return -ENODATA;
/* Limit the caps to the mounter of the filesystem
* or the more limited uid specified in the xattr.
*/
if (!rootid_owns_currentns(rootvfsuid))
return -ENODATA;
cpu_caps->permitted.val = le32_to_cpu(caps->data[0].permitted);
cpu_caps->inheritable.val = le32_to_cpu(caps->data[0].inheritable);
/*
* Rev1 had just a single 32-bit word, later expanded
* to a second one for the high bits
*/
if ((magic_etc & VFS_CAP_REVISION_MASK) != VFS_CAP_REVISION_1) {
cpu_caps->permitted.val += (u64)le32_to_cpu(caps->data[1].permitted) << 32;
cpu_caps->inheritable.val += (u64)le32_to_cpu(caps->data[1].inheritable) << 32;
}
cpu_caps->permitted.val &= CAP_VALID_MASK;
cpu_caps->inheritable.val &= CAP_VALID_MASK;
cpu_caps->rootid = vfsuid_into_kuid(rootvfsuid);
return 0;
}
/*
* Attempt to get the on-exec apply capability sets for an executable file from
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
static int get_file_caps(struct linux_binprm *bprm, struct file *file,
bool *effective, bool *has_fcap)
{
int rc = 0;
struct cpu_vfs_cap_data vcaps;
cap_clear(bprm->cred->cap_permitted);
if (!file_caps_enabled)
return 0;
if (!mnt_may_suid(file->f_path.mnt))
return 0;
/*
* This check is redundant with mnt_may_suid() but is kept to make
* explicit that capability bits are limited to s_user_ns and its
* descendants.
*/
if (!current_in_userns(file->f_path.mnt->mnt_sb->s_user_ns))
return 0;
rc = get_vfs_caps_from_disk(file_mnt_idmap(file),
file->f_path.dentry, &vcaps);
if (rc < 0) {
if (rc == -EINVAL)
printk(KERN_NOTICE "Invalid argument reading file caps for %s\n",
bprm->filename);
else if (rc == -ENODATA)
rc = 0;
goto out;
}
rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_fcap);
out:
if (rc)
cap_clear(bprm->cred->cap_permitted);
return rc;
}
static inline bool root_privileged(void) { return !issecure(SECURE_NOROOT); }
static inline bool __is_real(kuid_t uid, struct cred *cred)
{ return uid_eq(cred->uid, uid); }
static inline bool __is_eff(kuid_t uid, struct cred *cred)
{ return uid_eq(cred->euid, uid); }
static inline bool __is_suid(kuid_t uid, struct cred *cred)
{ return !__is_real(uid, cred) && __is_eff(uid, cred); }
/*
* handle_privileged_root - Handle case of privileged root
* @bprm: The execution parameters, including the proposed creds
* @has_fcap: Are any file capabilities set?
* @effective: Do we have effective root privilege?
* @root_uid: This namespace' root UID WRT initial USER namespace
*
* Handle the case where root is privileged and hasn't been neutered by
* SECURE_NOROOT. If file capabilities are set, they won't be combined with
* set UID root and nothing is changed. If we are root, cap_permitted is
* updated. If we have become set UID root, the effective bit is set.
*/
static void handle_privileged_root(struct linux_binprm *bprm, bool has_fcap,
bool *effective, kuid_t root_uid)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
if (!root_privileged())
return;
/*
* If the legacy file capability is set, then don't set privs
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
if (has_fcap && __is_suid(root_uid, new)) {
warn_setuid_and_fcaps_mixed(bprm->filename);
return;
}
/*
* To support inheritance of root-permissions and suid-root
* executables under compatibility mode, we override the
* capability sets for the file.
*/
if (__is_eff(root_uid, new) || __is_real(root_uid, new)) {
/* pP' = (cap_bset & ~0) | (pI & ~0) */
new->cap_permitted = cap_combine(old->cap_bset,
old->cap_inheritable);
}
/*
* If only the real uid is 0, we do not set the effective bit.
*/
if (__is_eff(root_uid, new))
*effective = true;
}
#define __cap_gained(field, target, source) \
!cap_issubset(target->cap_##field, source->cap_##field)
#define __cap_grew(target, source, cred) \
!cap_issubset(cred->cap_##target, cred->cap_##source)
#define __cap_full(field, cred) \
cap_issubset(CAP_FULL_SET, cred->cap_##field)
static inline bool __is_setuid(struct cred *new, const struct cred *old)
{ return !uid_eq(new->euid, old->uid); }
static inline bool __is_setgid(struct cred *new, const struct cred *old)
{ return !gid_eq(new->egid, old->gid); }
/*
* 1) Audit candidate if current->cap_effective is set
*
* We do not bother to audit if 3 things are true:
* 1) cap_effective has all caps
* 2) we became root *OR* are were already root
* 3) root is supposed to have all caps (SECURE_NOROOT)
* Since this is just a normal root execing a process.
*
* Number 1 above might fail if you don't have a full bset, but I think
* that is interesting information to audit.
*
* A number of other conditions require logging:
* 2) something prevented setuid root getting all caps
* 3) non-setuid root gets fcaps
* 4) non-setuid root gets ambient
*/
static inline bool nonroot_raised_pE(struct cred *new, const struct cred *old,
kuid_t root, bool has_fcap)
{
bool ret = false;
if ((__cap_grew(effective, ambient, new) &&
!(__cap_full(effective, new) &&
(__is_eff(root, new) || __is_real(root, new)) &&
root_privileged())) ||
(root_privileged() &&
__is_suid(root, new) &&
!__cap_full(effective, new)) ||
(!__is_setuid(new, old) &&
((has_fcap &&
__cap_gained(permitted, new, old)) ||
__cap_gained(ambient, new, old))))
ret = true;
return ret;
}
/**
* cap_bprm_creds_from_file - Set up the proposed credentials for execve().
* @bprm: The execution parameters, including the proposed creds
* @file: The file to pull the credentials from
*
* Set up the proposed credentials for a new execution context being
* constructed by execve(). The proposed creds in @bprm->cred is altered,
* which won't take effect immediately.
*
* Return: 0 if successful, -ve on error.
*/
int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file)
{
/* Process setpcap binaries and capabilities for uid 0 */
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
bool effective = false, has_fcap = false, is_setid;
int ret;
kuid_t root_uid;
if (WARN_ON(!cap_ambient_invariant_ok(old)))
return -EPERM;
ret = get_file_caps(bprm, file, &effective, &has_fcap);
if (ret < 0)
return ret;
root_uid = make_kuid(new->user_ns, 0);
handle_privileged_root(bprm, has_fcap, &effective, root_uid);
/* if we have fs caps, clear dangerous personality flags */
if (__cap_gained(permitted, new, old))
bprm->per_clear |= PER_CLEAR_ON_SETID;
/* Don't let someone trace a set[ug]id/setpcap binary with the revised
* credentials unless they have the appropriate permit.
*
* In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
*/
is_setid = __is_setuid(new, old) || __is_setgid(new, old);
if ((is_setid || __cap_gained(permitted, new, old)) &&
((bprm->unsafe & ~LSM_UNSAFE_PTRACE) ||
!ptracer_capable(current, new->user_ns))) {
/* downgrade; they get no more than they had, and maybe less */
if (!ns_capable(new->user_ns, CAP_SETUID) ||
(bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) {
new->euid = new->uid;
new->egid = new->gid;
}
new->cap_permitted = cap_intersect(new->cap_permitted,
old->cap_permitted);
}
new->suid = new->fsuid = new->euid;
new->sgid = new->fsgid = new->egid;
/* File caps or setid cancels ambient. */
if (has_fcap || is_setid)
cap_clear(new->cap_ambient);
/*
* Now that we've computed pA', update pP' to give:
* pP' = (X & fP) | (pI & fI) | pA'
*/
new->cap_permitted = cap_combine(new->cap_permitted, new->cap_ambient);
/*
* Set pE' = (fE ? pP' : pA'). Because pA' is zero if fE is set,
* this is the same as pE' = (fE ? pP' : 0) | pA'.
*/
if (effective)
new->cap_effective = new->cap_permitted;
else
new->cap_effective = new->cap_ambient;
if (WARN_ON(!cap_ambient_invariant_ok(new)))
return -EPERM;
if (nonroot_raised_pE(new, old, root_uid, has_fcap)) {
ret = audit_log_bprm_fcaps(bprm, new, old);
if (ret < 0)
return ret;
}
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
if (WARN_ON(!cap_ambient_invariant_ok(new)))
return -EPERM;
/* Check for privilege-elevated exec. */
if (is_setid ||
(!__is_real(root_uid, new) &&
(effective ||
__cap_grew(permitted, ambient, new))))
bprm->secureexec = 1;
return 0;
}
/**
* cap_inode_setxattr - Determine whether an xattr may be altered
* @dentry: The inode/dentry being altered
* @name: The name of the xattr to be changed
* @value: The value that the xattr will be changed to
* @size: The size of value
* @flags: The replacement flag
*
* Determine whether an xattr may be altered or set on an inode, returning 0 if
* permission is granted, -ve if denied.
*
* This is used to make sure security xattrs don't get updated or set by those
* who aren't privileged to do so.
*/
int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct user_namespace *user_ns = dentry->d_sb->s_user_ns;
/* Ignore non-security xattrs */
if (strncmp(name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN) != 0)
return 0;
/*
* For XATTR_NAME_CAPS the check will be done in
* cap_convert_nscap(), called by setxattr()
*/
if (strcmp(name, XATTR_NAME_CAPS) == 0)
return 0;
if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/**
* cap_inode_removexattr - Determine whether an xattr may be removed
*
* @idmap: idmap of the mount the inode was found from
* @dentry: The inode/dentry being altered
* @name: The name of the xattr to be changed
*
* Determine whether an xattr may be removed from an inode, returning 0 if
* permission is granted, -ve if denied.
*
* If the inode has been found through an idmapped mount the idmap of
* the vfsmount must be passed through @idmap. This function will then
* take care to map the inode according to @idmap before checking
* permissions. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply pass @nop_mnt_idmap.
*
* This is used to make sure security xattrs don't get removed by those who
* aren't privileged to remove them.
*/
int cap_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name)
{
struct user_namespace *user_ns = dentry->d_sb->s_user_ns;
/* Ignore non-security xattrs */
if (strncmp(name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN) != 0)
return 0;
if (strcmp(name, XATTR_NAME_CAPS) == 0) {
/* security.capability gets namespaced */
struct inode *inode = d_backing_inode(dentry);
if (!inode)
return -EINVAL;
if (!capable_wrt_inode_uidgid(idmap, inode, CAP_SETFCAP))
return -EPERM;
return 0;
}
if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/*
* cap_emulate_setxuid() fixes the effective / permitted capabilities of
* a process after a call to setuid, setreuid, or setresuid.
*
* 1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of
* {r,e,s}uid != 0, the permitted and effective capabilities are
* cleared.
*
* 2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective
* capabilities of the process are cleared.
*
* 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective
* capabilities are set to the permitted capabilities.
*
* fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should
* never happen.
*
* -astor
*
* cevans - New behaviour, Oct '99
* A process may, via prctl(), elect to keep its capabilities when it
* calls setuid() and switches away from uid==0. Both permitted and
* effective sets will be retained.
* Without this change, it was impossible for a daemon to drop only some
* of its privilege. The call to setuid(!=0) would drop all privileges!
* Keeping uid 0 is not an option because uid 0 owns too many vital
* files..
* Thanks to Olaf Kirch and Peter Benie for spotting this.
*/
static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old)
{
kuid_t root_uid = make_kuid(old->user_ns, 0);
if ((uid_eq(old->uid, root_uid) ||
uid_eq(old->euid, root_uid) ||
uid_eq(old->suid, root_uid)) &&
(!uid_eq(new->uid, root_uid) &&
!uid_eq(new->euid, root_uid) &&
!uid_eq(new->suid, root_uid))) {
if (!issecure(SECURE_KEEP_CAPS)) {
cap_clear(new->cap_permitted);
cap_clear(new->cap_effective);
}
/*
* Pre-ambient programs expect setresuid to nonroot followed
* by exec to drop capabilities. We should make sure that
* this remains the case.
*/
cap_clear(new->cap_ambient);
}
if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
cap_clear(new->cap_effective);
if (!uid_eq(old->euid, root_uid) && uid_eq(new->euid, root_uid))
new->cap_effective = new->cap_permitted;
}
/**
* cap_task_fix_setuid - Fix up the results of setuid() call
* @new: The proposed credentials
* @old: The current task's current credentials
* @flags: Indications of what has changed
*
* Fix up the results of setuid() call before the credential changes are
* actually applied.
*
* Return: 0 to grant the changes, -ve to deny them.
*/
int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
{
switch (flags) {
case LSM_SETID_RE:
case LSM_SETID_ID:
case LSM_SETID_RES:
/* juggle the capabilities to follow [RES]UID changes unless
* otherwise suppressed */
if (!issecure(SECURE_NO_SETUID_FIXUP))
cap_emulate_setxuid(new, old);
break;
case LSM_SETID_FS:
/* juggle the capabilities to follow FSUID changes, unless
* otherwise suppressed
*
* FIXME - is fsuser used for all CAP_FS_MASK capabilities?
* if not, we might be a bit too harsh here.
*/
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
kuid_t root_uid = make_kuid(old->user_ns, 0);
if (uid_eq(old->fsuid, root_uid) && !uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_drop_fs_set(new->cap_effective);
if (!uid_eq(old->fsuid, root_uid) && uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_raise_fs_set(new->cap_effective,
new->cap_permitted);
}
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Rationale: code calling task_setscheduler, task_setioprio, and
* task_setnice, assumes that
* . if capable(cap_sys_nice), then those actions should be allowed
* . if not capable(cap_sys_nice), but acting on your own processes,
* then those actions should be allowed
* This is insufficient now since you can call code without suid, but
* yet with increased caps.
* So we check for increased caps on the target process.
*/
static int cap_safe_nice(struct task_struct *p)
{
int is_subset, ret = 0;
rcu_read_lock();
is_subset = cap_issubset(__task_cred(p)->cap_permitted,
current_cred()->cap_permitted);
if (!is_subset && !ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
ret = -EPERM;
rcu_read_unlock();
return ret;
}
/**
* cap_task_setscheduler - Determine if scheduler policy change is permitted
* @p: The task to affect
*
* Determine if the requested scheduler policy change is permitted for the
* specified task.
*
* Return: 0 if permission is granted, -ve if denied.
*/
int cap_task_setscheduler(struct task_struct *p)
{
return cap_safe_nice(p);
}
/**
* cap_task_setioprio - Determine if I/O priority change is permitted
* @p: The task to affect
* @ioprio: The I/O priority to set
*
* Determine if the requested I/O priority change is permitted for the specified
* task.
*
* Return: 0 if permission is granted, -ve if denied.
*/
int cap_task_setioprio(struct task_struct *p, int ioprio)
{
return cap_safe_nice(p);
}
/**
* cap_task_setnice - Determine if task priority change is permitted
* @p: The task to affect
* @nice: The nice value to set
*
* Determine if the requested task priority change is permitted for the
* specified task.
*
* Return: 0 if permission is granted, -ve if denied.
*/
int cap_task_setnice(struct task_struct *p, int nice)
{
return cap_safe_nice(p);
}
/*
* Implement PR_CAPBSET_DROP. Attempt to remove the specified capability from
* the current task's bounding set. Returns 0 on success, -ve on error.
*/
static int cap_prctl_drop(unsigned long cap)
{
struct cred *new;
if (!ns_capable(current_user_ns(), CAP_SETPCAP))
return -EPERM;
if (!cap_valid(cap))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
cap_lower(new->cap_bset, cap);
return commit_creds(new);
}
/**
* cap_task_prctl - Implement process control functions for this security module
* @option: The process control function requested
* @arg2: The argument data for this function
* @arg3: The argument data for this function
* @arg4: The argument data for this function
* @arg5: The argument data for this function
*
* Allow process control functions (sys_prctl()) to alter capabilities; may
* also deny access to other functions not otherwise implemented here.
*
* Return: 0 or +ve on success, -ENOSYS if this function is not implemented
* here, other -ve on error. If -ENOSYS is returned, sys_prctl() and other LSM
* modules will consider performing the function.
*/
int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
const struct cred *old = current_cred();
struct cred *new;
switch (option) {
case PR_CAPBSET_READ:
if (!cap_valid(arg2))
return -EINVAL;
return !!cap_raised(old->cap_bset, arg2);
case PR_CAPBSET_DROP:
return cap_prctl_drop(arg2);
/*
* The next four prctl's remain to assist with transitioning a
* system from legacy UID=0 based privilege (when filesystem
* capabilities are not in use) to a system using filesystem
* capabilities only - as the POSIX.1e draft intended.
*
* Note:
*
* PR_SET_SECUREBITS =
* issecure_mask(SECURE_KEEP_CAPS_LOCKED)
* | issecure_mask(SECURE_NOROOT)
* | issecure_mask(SECURE_NOROOT_LOCKED)
* | issecure_mask(SECURE_NO_SETUID_FIXUP)
* | issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)
*
* will ensure that the current process and all of its
* children will be locked into a pure
* capability-based-privilege environment.
*/
case PR_SET_SECUREBITS:
if ((((old->securebits & SECURE_ALL_LOCKS) >> 1)
& (old->securebits ^ arg2)) /*[1]*/
|| ((old->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
|| (cap_capable(current_cred(),
current_cred()->user_ns,
CAP_SETPCAP,
CAP_OPT_NONE) != 0) /*[4]*/
/*
* [1] no changing of bits that are locked
* [2] no unlocking of locks
* [3] no setting of unsupported bits
* [4] doing anything requires privilege (go read about
* the "sendmail capabilities bug")
*/
)
/* cannot change a locked bit */
return -EPERM;
new = prepare_creds();
if (!new)
return -ENOMEM;
new->securebits = arg2;
return commit_creds(new);
case PR_GET_SECUREBITS:
return old->securebits;
case PR_GET_KEEPCAPS:
return !!issecure(SECURE_KEEP_CAPS);
case PR_SET_KEEPCAPS:
if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
return -EINVAL;
if (issecure(SECURE_KEEP_CAPS_LOCKED))
return -EPERM;
new = prepare_creds();
if (!new)
return -ENOMEM;
if (arg2)
new->securebits |= issecure_mask(SECURE_KEEP_CAPS);
else
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
return commit_creds(new);
case PR_CAP_AMBIENT:
if (arg2 == PR_CAP_AMBIENT_CLEAR_ALL) {
if (arg3 | arg4 | arg5)
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
cap_clear(new->cap_ambient);
return commit_creds(new);
}
if (((!cap_valid(arg3)) | arg4 | arg5))
return -EINVAL;
if (arg2 == PR_CAP_AMBIENT_IS_SET) {
return !!cap_raised(current_cred()->cap_ambient, arg3);
} else if (arg2 != PR_CAP_AMBIENT_RAISE &&
arg2 != PR_CAP_AMBIENT_LOWER) {
return -EINVAL;
} else {
if (arg2 == PR_CAP_AMBIENT_RAISE &&
(!cap_raised(current_cred()->cap_permitted, arg3) ||
!cap_raised(current_cred()->cap_inheritable,
arg3) ||
issecure(SECURE_NO_CAP_AMBIENT_RAISE)))
return -EPERM;
new = prepare_creds();
if (!new)
return -ENOMEM;
if (arg2 == PR_CAP_AMBIENT_RAISE)
cap_raise(new->cap_ambient, arg3);
else
cap_lower(new->cap_ambient, arg3);
return commit_creds(new);
}
default:
/* No functionality available - continue with default */
return -ENOSYS;
}
}
/**
* cap_vm_enough_memory - Determine whether a new virtual mapping is permitted
* @mm: The VM space in which the new mapping is to be made
* @pages: The size of the mapping
*
* Determine whether the allocation of a new virtual mapping by the current
* task is permitted.
*
* Return: 1 if permission is granted, 0 if not.
*/
int cap_vm_enough_memory(struct mm_struct *mm, long pages)
{
int cap_sys_admin = 0;
if (cap_capable(current_cred(), &init_user_ns,
CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) == 0)
cap_sys_admin = 1;
return cap_sys_admin;
}
/**
* cap_mmap_addr - check if able to map given addr
* @addr: address attempting to be mapped
*
* If the process is attempting to map memory below dac_mmap_min_addr they need
* CAP_SYS_RAWIO. The other parameters to this function are unused by the
* capability security module.
*
* Return: 0 if this mapping should be allowed or -EPERM if not.
*/
int cap_mmap_addr(unsigned long addr)
{
int ret = 0;
if (addr < dac_mmap_min_addr) {
ret = cap_capable(current_cred(), &init_user_ns, CAP_SYS_RAWIO,
CAP_OPT_NONE);
/* set PF_SUPERPRIV if it turns out we allow the low mmap */
if (ret == 0)
current->flags |= PF_SUPERPRIV;
}
return ret;
}
int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
{
return 0;
}
#ifdef CONFIG_SECURITY
static struct security_hook_list capability_hooks[] __ro_after_init = {
LSM_HOOK_INIT(capable, cap_capable),
LSM_HOOK_INIT(settime, cap_settime),
LSM_HOOK_INIT(ptrace_access_check, cap_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, cap_ptrace_traceme),
LSM_HOOK_INIT(capget, cap_capget),
LSM_HOOK_INIT(capset, cap_capset),
LSM_HOOK_INIT(bprm_creds_from_file, cap_bprm_creds_from_file),
LSM_HOOK_INIT(inode_need_killpriv, cap_inode_need_killpriv),
LSM_HOOK_INIT(inode_killpriv, cap_inode_killpriv),
LSM_HOOK_INIT(inode_getsecurity, cap_inode_getsecurity),
LSM_HOOK_INIT(mmap_addr, cap_mmap_addr),
LSM_HOOK_INIT(mmap_file, cap_mmap_file),
LSM_HOOK_INIT(task_fix_setuid, cap_task_fix_setuid),
LSM_HOOK_INIT(task_prctl, cap_task_prctl),
LSM_HOOK_INIT(task_setscheduler, cap_task_setscheduler),
LSM_HOOK_INIT(task_setioprio, cap_task_setioprio),
LSM_HOOK_INIT(task_setnice, cap_task_setnice),
LSM_HOOK_INIT(vm_enough_memory, cap_vm_enough_memory),
};
static int __init capability_init(void)
{
security_add_hooks(capability_hooks, ARRAY_SIZE(capability_hooks),
"capability");
return 0;
}
DEFINE_LSM(capability) = {
.name = "capability",
.order = LSM_ORDER_FIRST,
.init = capability_init,
};
#endif /* CONFIG_SECURITY */
| linux-master | security/commoncap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SafeSetID Linux Security Module
*
* Author: Micah Morton <[email protected]>
*
* Copyright (C) 2018 The Chromium OS Authors.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*
*/
#define pr_fmt(fmt) "SafeSetID: " fmt
#include <linux/lsm_hooks.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/sched/task_stack.h>
#include <linux/security.h>
#include "lsm.h"
/* Flag indicating whether initialization completed */
int safesetid_initialized __initdata;
struct setid_ruleset __rcu *safesetid_setuid_rules;
struct setid_ruleset __rcu *safesetid_setgid_rules;
/* Compute a decision for a transition from @src to @dst under @policy. */
enum sid_policy_type _setid_policy_lookup(struct setid_ruleset *policy,
kid_t src, kid_t dst)
{
struct setid_rule *rule;
enum sid_policy_type result = SIDPOL_DEFAULT;
if (policy->type == UID) {
hash_for_each_possible(policy->rules, rule, next, __kuid_val(src.uid)) {
if (!uid_eq(rule->src_id.uid, src.uid))
continue;
if (uid_eq(rule->dst_id.uid, dst.uid))
return SIDPOL_ALLOWED;
result = SIDPOL_CONSTRAINED;
}
} else if (policy->type == GID) {
hash_for_each_possible(policy->rules, rule, next, __kgid_val(src.gid)) {
if (!gid_eq(rule->src_id.gid, src.gid))
continue;
if (gid_eq(rule->dst_id.gid, dst.gid)){
return SIDPOL_ALLOWED;
}
result = SIDPOL_CONSTRAINED;
}
} else {
/* Should not reach here, report the ID as contrainsted */
result = SIDPOL_CONSTRAINED;
}
return result;
}
/*
* Compute a decision for a transition from @src to @dst under the active
* policy.
*/
static enum sid_policy_type setid_policy_lookup(kid_t src, kid_t dst, enum setid_type new_type)
{
enum sid_policy_type result = SIDPOL_DEFAULT;
struct setid_ruleset *pol;
rcu_read_lock();
if (new_type == UID)
pol = rcu_dereference(safesetid_setuid_rules);
else if (new_type == GID)
pol = rcu_dereference(safesetid_setgid_rules);
else { /* Should not reach here */
result = SIDPOL_CONSTRAINED;
rcu_read_unlock();
return result;
}
if (pol) {
pol->type = new_type;
result = _setid_policy_lookup(pol, src, dst);
}
rcu_read_unlock();
return result;
}
static int safesetid_security_capable(const struct cred *cred,
struct user_namespace *ns,
int cap,
unsigned int opts)
{
/* We're only interested in CAP_SETUID and CAP_SETGID. */
if (cap != CAP_SETUID && cap != CAP_SETGID)
return 0;
/*
* If CAP_SET{U/G}ID is currently used for a setid or setgroups syscall, we
* want to let it go through here; the real security check happens later, in
* the task_fix_set{u/g}id or task_fix_setgroups hooks.
*/
if ((opts & CAP_OPT_INSETID) != 0)
return 0;
switch (cap) {
case CAP_SETUID:
/*
* If no policy applies to this task, allow the use of CAP_SETUID for
* other purposes.
*/
if (setid_policy_lookup((kid_t){.uid = cred->uid}, INVALID_ID, UID) == SIDPOL_DEFAULT)
return 0;
/*
* Reject use of CAP_SETUID for functionality other than calling
* set*uid() (e.g. setting up userns uid mappings).
*/
pr_warn("Operation requires CAP_SETUID, which is not available to UID %u for operations besides approved set*uid transitions\n",
__kuid_val(cred->uid));
return -EPERM;
case CAP_SETGID:
/*
* If no policy applies to this task, allow the use of CAP_SETGID for
* other purposes.
*/
if (setid_policy_lookup((kid_t){.gid = cred->gid}, INVALID_ID, GID) == SIDPOL_DEFAULT)
return 0;
/*
* Reject use of CAP_SETUID for functionality other than calling
* set*gid() (e.g. setting up userns gid mappings).
*/
pr_warn("Operation requires CAP_SETGID, which is not available to GID %u for operations besides approved set*gid transitions\n",
__kgid_val(cred->gid));
return -EPERM;
default:
/* Error, the only capabilities were checking for is CAP_SETUID/GID */
return 0;
}
return 0;
}
/*
* Check whether a caller with old credentials @old is allowed to switch to
* credentials that contain @new_id.
*/
static bool id_permitted_for_cred(const struct cred *old, kid_t new_id, enum setid_type new_type)
{
bool permitted;
/* If our old creds already had this ID in it, it's fine. */
if (new_type == UID) {
if (uid_eq(new_id.uid, old->uid) || uid_eq(new_id.uid, old->euid) ||
uid_eq(new_id.uid, old->suid))
return true;
} else if (new_type == GID){
if (gid_eq(new_id.gid, old->gid) || gid_eq(new_id.gid, old->egid) ||
gid_eq(new_id.gid, old->sgid))
return true;
} else /* Error, new_type is an invalid type */
return false;
/*
* Transitions to new UIDs require a check against the policy of the old
* RUID.
*/
permitted =
setid_policy_lookup((kid_t){.uid = old->uid}, new_id, new_type) != SIDPOL_CONSTRAINED;
if (!permitted) {
if (new_type == UID) {
pr_warn("UID transition ((%d,%d,%d) -> %d) blocked\n",
__kuid_val(old->uid), __kuid_val(old->euid),
__kuid_val(old->suid), __kuid_val(new_id.uid));
} else if (new_type == GID) {
pr_warn("GID transition ((%d,%d,%d) -> %d) blocked\n",
__kgid_val(old->gid), __kgid_val(old->egid),
__kgid_val(old->sgid), __kgid_val(new_id.gid));
} else /* Error, new_type is an invalid type */
return false;
}
return permitted;
}
/*
* Check whether there is either an exception for user under old cred struct to
* set*uid to user under new cred struct, or the UID transition is allowed (by
* Linux set*uid rules) even without CAP_SETUID.
*/
static int safesetid_task_fix_setuid(struct cred *new,
const struct cred *old,
int flags)
{
/* Do nothing if there are no setuid restrictions for our old RUID. */
if (setid_policy_lookup((kid_t){.uid = old->uid}, INVALID_ID, UID) == SIDPOL_DEFAULT)
return 0;
if (id_permitted_for_cred(old, (kid_t){.uid = new->uid}, UID) &&
id_permitted_for_cred(old, (kid_t){.uid = new->euid}, UID) &&
id_permitted_for_cred(old, (kid_t){.uid = new->suid}, UID) &&
id_permitted_for_cred(old, (kid_t){.uid = new->fsuid}, UID))
return 0;
/*
* Kill this process to avoid potential security vulnerabilities
* that could arise from a missing allowlist entry preventing a
* privileged process from dropping to a lesser-privileged one.
*/
force_sig(SIGKILL);
return -EACCES;
}
static int safesetid_task_fix_setgid(struct cred *new,
const struct cred *old,
int flags)
{
/* Do nothing if there are no setgid restrictions for our old RGID. */
if (setid_policy_lookup((kid_t){.gid = old->gid}, INVALID_ID, GID) == SIDPOL_DEFAULT)
return 0;
if (id_permitted_for_cred(old, (kid_t){.gid = new->gid}, GID) &&
id_permitted_for_cred(old, (kid_t){.gid = new->egid}, GID) &&
id_permitted_for_cred(old, (kid_t){.gid = new->sgid}, GID) &&
id_permitted_for_cred(old, (kid_t){.gid = new->fsgid}, GID))
return 0;
/*
* Kill this process to avoid potential security vulnerabilities
* that could arise from a missing allowlist entry preventing a
* privileged process from dropping to a lesser-privileged one.
*/
force_sig(SIGKILL);
return -EACCES;
}
static int safesetid_task_fix_setgroups(struct cred *new, const struct cred *old)
{
int i;
/* Do nothing if there are no setgid restrictions for our old RGID. */
if (setid_policy_lookup((kid_t){.gid = old->gid}, INVALID_ID, GID) == SIDPOL_DEFAULT)
return 0;
get_group_info(new->group_info);
for (i = 0; i < new->group_info->ngroups; i++) {
if (!id_permitted_for_cred(old, (kid_t){.gid = new->group_info->gid[i]}, GID)) {
put_group_info(new->group_info);
/*
* Kill this process to avoid potential security vulnerabilities
* that could arise from a missing allowlist entry preventing a
* privileged process from dropping to a lesser-privileged one.
*/
force_sig(SIGKILL);
return -EACCES;
}
}
put_group_info(new->group_info);
return 0;
}
static struct security_hook_list safesetid_security_hooks[] = {
LSM_HOOK_INIT(task_fix_setuid, safesetid_task_fix_setuid),
LSM_HOOK_INIT(task_fix_setgid, safesetid_task_fix_setgid),
LSM_HOOK_INIT(task_fix_setgroups, safesetid_task_fix_setgroups),
LSM_HOOK_INIT(capable, safesetid_security_capable)
};
static int __init safesetid_security_init(void)
{
security_add_hooks(safesetid_security_hooks,
ARRAY_SIZE(safesetid_security_hooks), "safesetid");
/* Report that SafeSetID successfully initialized */
safesetid_initialized = 1;
return 0;
}
DEFINE_LSM(safesetid_security_init) = {
.init = safesetid_security_init,
.name = "safesetid",
};
| linux-master | security/safesetid/lsm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SafeSetID Linux Security Module
*
* Author: Micah Morton <[email protected]>
*
* Copyright (C) 2018 The Chromium OS Authors.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*
*/
#define pr_fmt(fmt) "SafeSetID: " fmt
#include <linux/security.h>
#include <linux/cred.h>
#include "lsm.h"
static DEFINE_MUTEX(uid_policy_update_lock);
static DEFINE_MUTEX(gid_policy_update_lock);
/*
* In the case the input buffer contains one or more invalid IDs, the kid_t
* variables pointed to by @parent and @child will get updated but this
* function will return an error.
* Contents of @buf may be modified.
*/
static int parse_policy_line(struct file *file, char *buf,
struct setid_rule *rule)
{
char *child_str;
int ret;
u32 parsed_parent, parsed_child;
/* Format of |buf| string should be <UID>:<UID> or <GID>:<GID> */
child_str = strchr(buf, ':');
if (child_str == NULL)
return -EINVAL;
*child_str = '\0';
child_str++;
ret = kstrtou32(buf, 0, &parsed_parent);
if (ret)
return ret;
ret = kstrtou32(child_str, 0, &parsed_child);
if (ret)
return ret;
if (rule->type == UID){
rule->src_id.uid = make_kuid(file->f_cred->user_ns, parsed_parent);
rule->dst_id.uid = make_kuid(file->f_cred->user_ns, parsed_child);
if (!uid_valid(rule->src_id.uid) || !uid_valid(rule->dst_id.uid))
return -EINVAL;
} else if (rule->type == GID){
rule->src_id.gid = make_kgid(file->f_cred->user_ns, parsed_parent);
rule->dst_id.gid = make_kgid(file->f_cred->user_ns, parsed_child);
if (!gid_valid(rule->src_id.gid) || !gid_valid(rule->dst_id.gid))
return -EINVAL;
} else {
/* Error, rule->type is an invalid type */
return -EINVAL;
}
return 0;
}
static void __release_ruleset(struct rcu_head *rcu)
{
struct setid_ruleset *pol =
container_of(rcu, struct setid_ruleset, rcu);
int bucket;
struct setid_rule *rule;
struct hlist_node *tmp;
hash_for_each_safe(pol->rules, bucket, tmp, rule, next)
kfree(rule);
kfree(pol->policy_str);
kfree(pol);
}
static void release_ruleset(struct setid_ruleset *pol){
call_rcu(&pol->rcu, __release_ruleset);
}
static void insert_rule(struct setid_ruleset *pol, struct setid_rule *rule)
{
if (pol->type == UID)
hash_add(pol->rules, &rule->next, __kuid_val(rule->src_id.uid));
else if (pol->type == GID)
hash_add(pol->rules, &rule->next, __kgid_val(rule->src_id.gid));
else /* Error, pol->type is neither UID or GID */
return;
}
static int verify_ruleset(struct setid_ruleset *pol)
{
int bucket;
struct setid_rule *rule, *nrule;
int res = 0;
hash_for_each(pol->rules, bucket, rule, next) {
if (_setid_policy_lookup(pol, rule->dst_id, INVALID_ID) == SIDPOL_DEFAULT) {
if (pol->type == UID) {
pr_warn("insecure policy detected: uid %d is constrained but transitively unconstrained through uid %d\n",
__kuid_val(rule->src_id.uid),
__kuid_val(rule->dst_id.uid));
} else if (pol->type == GID) {
pr_warn("insecure policy detected: gid %d is constrained but transitively unconstrained through gid %d\n",
__kgid_val(rule->src_id.gid),
__kgid_val(rule->dst_id.gid));
} else { /* pol->type is an invalid type */
res = -EINVAL;
return res;
}
res = -EINVAL;
/* fix it up */
nrule = kmalloc(sizeof(struct setid_rule), GFP_KERNEL);
if (!nrule)
return -ENOMEM;
if (pol->type == UID){
nrule->src_id.uid = rule->dst_id.uid;
nrule->dst_id.uid = rule->dst_id.uid;
nrule->type = UID;
} else { /* pol->type must be GID if we've made it to here */
nrule->src_id.gid = rule->dst_id.gid;
nrule->dst_id.gid = rule->dst_id.gid;
nrule->type = GID;
}
insert_rule(pol, nrule);
}
}
return res;
}
static ssize_t handle_policy_update(struct file *file,
const char __user *ubuf, size_t len, enum setid_type policy_type)
{
struct setid_ruleset *pol;
char *buf, *p, *end;
int err;
pol = kmalloc(sizeof(struct setid_ruleset), GFP_KERNEL);
if (!pol)
return -ENOMEM;
pol->policy_str = NULL;
pol->type = policy_type;
hash_init(pol->rules);
p = buf = memdup_user_nul(ubuf, len);
if (IS_ERR(buf)) {
err = PTR_ERR(buf);
goto out_free_pol;
}
pol->policy_str = kstrdup(buf, GFP_KERNEL);
if (pol->policy_str == NULL) {
err = -ENOMEM;
goto out_free_buf;
}
/* policy lines, including the last one, end with \n */
while (*p != '\0') {
struct setid_rule *rule;
end = strchr(p, '\n');
if (end == NULL) {
err = -EINVAL;
goto out_free_buf;
}
*end = '\0';
rule = kmalloc(sizeof(struct setid_rule), GFP_KERNEL);
if (!rule) {
err = -ENOMEM;
goto out_free_buf;
}
rule->type = policy_type;
err = parse_policy_line(file, p, rule);
if (err)
goto out_free_rule;
if (_setid_policy_lookup(pol, rule->src_id, rule->dst_id) == SIDPOL_ALLOWED) {
pr_warn("bad policy: duplicate entry\n");
err = -EEXIST;
goto out_free_rule;
}
insert_rule(pol, rule);
p = end + 1;
continue;
out_free_rule:
kfree(rule);
goto out_free_buf;
}
err = verify_ruleset(pol);
/* bogus policy falls through after fixing it up */
if (err && err != -EINVAL)
goto out_free_buf;
/*
* Everything looks good, apply the policy and release the old one.
* What we really want here is an xchg() wrapper for RCU, but since that
* doesn't currently exist, just use a spinlock for now.
*/
if (policy_type == UID) {
mutex_lock(&uid_policy_update_lock);
pol = rcu_replace_pointer(safesetid_setuid_rules, pol,
lockdep_is_held(&uid_policy_update_lock));
mutex_unlock(&uid_policy_update_lock);
} else if (policy_type == GID) {
mutex_lock(&gid_policy_update_lock);
pol = rcu_replace_pointer(safesetid_setgid_rules, pol,
lockdep_is_held(&gid_policy_update_lock));
mutex_unlock(&gid_policy_update_lock);
} else {
/* Error, policy type is neither UID or GID */
pr_warn("error: bad policy type");
}
err = len;
out_free_buf:
kfree(buf);
out_free_pol:
if (pol)
release_ruleset(pol);
return err;
}
static ssize_t safesetid_uid_file_write(struct file *file,
const char __user *buf,
size_t len,
loff_t *ppos)
{
if (!file_ns_capable(file, &init_user_ns, CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
return handle_policy_update(file, buf, len, UID);
}
static ssize_t safesetid_gid_file_write(struct file *file,
const char __user *buf,
size_t len,
loff_t *ppos)
{
if (!file_ns_capable(file, &init_user_ns, CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
return handle_policy_update(file, buf, len, GID);
}
static ssize_t safesetid_file_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos, struct mutex *policy_update_lock, struct __rcu setid_ruleset* ruleset)
{
ssize_t res = 0;
struct setid_ruleset *pol;
const char *kbuf;
mutex_lock(policy_update_lock);
pol = rcu_dereference_protected(ruleset, lockdep_is_held(policy_update_lock));
if (pol) {
kbuf = pol->policy_str;
res = simple_read_from_buffer(buf, len, ppos,
kbuf, strlen(kbuf));
}
mutex_unlock(policy_update_lock);
return res;
}
static ssize_t safesetid_uid_file_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
return safesetid_file_read(file, buf, len, ppos,
&uid_policy_update_lock, safesetid_setuid_rules);
}
static ssize_t safesetid_gid_file_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
return safesetid_file_read(file, buf, len, ppos,
&gid_policy_update_lock, safesetid_setgid_rules);
}
static const struct file_operations safesetid_uid_file_fops = {
.read = safesetid_uid_file_read,
.write = safesetid_uid_file_write,
};
static const struct file_operations safesetid_gid_file_fops = {
.read = safesetid_gid_file_read,
.write = safesetid_gid_file_write,
};
static int __init safesetid_init_securityfs(void)
{
int ret;
struct dentry *policy_dir;
struct dentry *uid_policy_file;
struct dentry *gid_policy_file;
if (!safesetid_initialized)
return 0;
policy_dir = securityfs_create_dir("safesetid", NULL);
if (IS_ERR(policy_dir)) {
ret = PTR_ERR(policy_dir);
goto error;
}
uid_policy_file = securityfs_create_file("uid_allowlist_policy", 0600,
policy_dir, NULL, &safesetid_uid_file_fops);
if (IS_ERR(uid_policy_file)) {
ret = PTR_ERR(uid_policy_file);
goto error;
}
gid_policy_file = securityfs_create_file("gid_allowlist_policy", 0600,
policy_dir, NULL, &safesetid_gid_file_fops);
if (IS_ERR(gid_policy_file)) {
ret = PTR_ERR(gid_policy_file);
goto error;
}
return 0;
error:
securityfs_remove(policy_dir);
return ret;
}
fs_initcall(safesetid_init_securityfs);
| linux-master | security/safesetid/securityfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Landlock LSM - Object management
*
* Copyright © 2016-2020 Mickaël Salaün <[email protected]>
* Copyright © 2018-2020 ANSSI
*/
#include <linux/bug.h>
#include <linux/compiler_types.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "object.h"
struct landlock_object *
landlock_create_object(const struct landlock_object_underops *const underops,
void *const underobj)
{
struct landlock_object *new_object;
if (WARN_ON_ONCE(!underops || !underobj))
return ERR_PTR(-ENOENT);
new_object = kzalloc(sizeof(*new_object), GFP_KERNEL_ACCOUNT);
if (!new_object)
return ERR_PTR(-ENOMEM);
refcount_set(&new_object->usage, 1);
spin_lock_init(&new_object->lock);
new_object->underops = underops;
new_object->underobj = underobj;
return new_object;
}
/*
* The caller must own the object (i.e. thanks to object->usage) to safely put
* it.
*/
void landlock_put_object(struct landlock_object *const object)
{
/*
* The call to @object->underops->release(object) might sleep, e.g.
* because of iput().
*/
might_sleep();
if (!object)
return;
/*
* If the @object's refcount cannot drop to zero, we can just decrement
* the refcount without holding a lock. Otherwise, the decrement must
* happen under @object->lock for synchronization with things like
* get_inode_object().
*/
if (refcount_dec_and_lock(&object->usage, &object->lock)) {
__acquire(&object->lock);
/*
* With @object->lock initially held, remove the reference from
* @object->underobj to @object (if it still exists).
*/
object->underops->release(object);
kfree_rcu(object, rcu_free);
}
}
| linux-master | security/landlock/object.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Landlock LSM - Ptrace hooks
*
* Copyright © 2017-2020 Mickaël Salaün <[email protected]>
* Copyright © 2019-2020 ANSSI
*/
#include <asm/current.h>
#include <linux/cred.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/lsm_hooks.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include "common.h"
#include "cred.h"
#include "ptrace.h"
#include "ruleset.h"
#include "setup.h"
/**
* domain_scope_le - Checks domain ordering for scoped ptrace
*
* @parent: Parent domain.
* @child: Potential child of @parent.
*
* Checks if the @parent domain is less or equal to (i.e. an ancestor, which
* means a subset of) the @child domain.
*/
static bool domain_scope_le(const struct landlock_ruleset *const parent,
const struct landlock_ruleset *const child)
{
const struct landlock_hierarchy *walker;
if (!parent)
return true;
if (!child)
return false;
for (walker = child->hierarchy; walker; walker = walker->parent) {
if (walker == parent->hierarchy)
/* @parent is in the scoped hierarchy of @child. */
return true;
}
/* There is no relationship between @parent and @child. */
return false;
}
static bool task_is_scoped(const struct task_struct *const parent,
const struct task_struct *const child)
{
bool is_scoped;
const struct landlock_ruleset *dom_parent, *dom_child;
rcu_read_lock();
dom_parent = landlock_get_task_domain(parent);
dom_child = landlock_get_task_domain(child);
is_scoped = domain_scope_le(dom_parent, dom_child);
rcu_read_unlock();
return is_scoped;
}
static int task_ptrace(const struct task_struct *const parent,
const struct task_struct *const child)
{
/* Quick return for non-landlocked tasks. */
if (!landlocked(parent))
return 0;
if (task_is_scoped(parent, child))
return 0;
return -EPERM;
}
/**
* hook_ptrace_access_check - Determines whether the current process may access
* another
*
* @child: Process to be accessed.
* @mode: Mode of attachment.
*
* If the current task has Landlock rules, then the child must have at least
* the same rules. Else denied.
*
* Determines whether a process may access another, returning 0 if permission
* granted, -errno if denied.
*/
static int hook_ptrace_access_check(struct task_struct *const child,
const unsigned int mode)
{
return task_ptrace(current, child);
}
/**
* hook_ptrace_traceme - Determines whether another process may trace the
* current one
*
* @parent: Task proposed to be the tracer.
*
* If the parent has Landlock rules, then the current task must have the same
* or more rules. Else denied.
*
* Determines whether the nominated task is permitted to trace the current
* process, returning 0 if permission is granted, -errno if denied.
*/
static int hook_ptrace_traceme(struct task_struct *const parent)
{
return task_ptrace(parent, current);
}
static struct security_hook_list landlock_hooks[] __ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, hook_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, hook_ptrace_traceme),
};
__init void landlock_add_ptrace_hooks(void)
{
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
LANDLOCK_NAME);
}
| linux-master | security/landlock/ptrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Landlock LSM - System call implementations and user space interfaces
*
* Copyright © 2016-2020 Mickaël Salaün <[email protected]>
* Copyright © 2018-2020 ANSSI
*/
#include <asm/current.h>
#include <linux/anon_inodes.h>
#include <linux/build_bug.h>
#include <linux/capability.h>
#include <linux/compiler_types.h>
#include <linux/dcache.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/limits.h>
#include <linux/mount.h>
#include <linux/path.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/stddef.h>
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <uapi/linux/landlock.h>
#include "cred.h"
#include "fs.h"
#include "limits.h"
#include "ruleset.h"
#include "setup.h"
/**
* copy_min_struct_from_user - Safe future-proof argument copying
*
* Extend copy_struct_from_user() to check for consistent user buffer.
*
* @dst: Kernel space pointer or NULL.
* @ksize: Actual size of the data pointed to by @dst.
* @ksize_min: Minimal required size to be copied.
* @src: User space pointer or NULL.
* @usize: (Alleged) size of the data pointed to by @src.
*/
static __always_inline int
copy_min_struct_from_user(void *const dst, const size_t ksize,
const size_t ksize_min, const void __user *const src,
const size_t usize)
{
/* Checks buffer inconsistencies. */
BUILD_BUG_ON(!dst);
if (!src)
return -EFAULT;
/* Checks size ranges. */
BUILD_BUG_ON(ksize <= 0);
BUILD_BUG_ON(ksize < ksize_min);
if (usize < ksize_min)
return -EINVAL;
if (usize > PAGE_SIZE)
return -E2BIG;
/* Copies user buffer and fills with zeros. */
return copy_struct_from_user(dst, ksize, src, usize);
}
/*
* This function only contains arithmetic operations with constants, leading to
* BUILD_BUG_ON(). The related code is evaluated and checked at build time,
* but it is then ignored thanks to compiler optimizations.
*/
static void build_check_abi(void)
{
struct landlock_ruleset_attr ruleset_attr;
struct landlock_path_beneath_attr path_beneath_attr;
size_t ruleset_size, path_beneath_size;
/*
* For each user space ABI structures, first checks that there is no
* hole in them, then checks that all architectures have the same
* struct size.
*/
ruleset_size = sizeof(ruleset_attr.handled_access_fs);
BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size);
BUILD_BUG_ON(sizeof(ruleset_attr) != 8);
path_beneath_size = sizeof(path_beneath_attr.allowed_access);
path_beneath_size += sizeof(path_beneath_attr.parent_fd);
BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size);
BUILD_BUG_ON(sizeof(path_beneath_attr) != 12);
}
/* Ruleset handling */
static int fop_ruleset_release(struct inode *const inode,
struct file *const filp)
{
struct landlock_ruleset *ruleset = filp->private_data;
landlock_put_ruleset(ruleset);
return 0;
}
static ssize_t fop_dummy_read(struct file *const filp, char __user *const buf,
const size_t size, loff_t *const ppos)
{
/* Dummy handler to enable FMODE_CAN_READ. */
return -EINVAL;
}
static ssize_t fop_dummy_write(struct file *const filp,
const char __user *const buf, const size_t size,
loff_t *const ppos)
{
/* Dummy handler to enable FMODE_CAN_WRITE. */
return -EINVAL;
}
/*
* A ruleset file descriptor enables to build a ruleset by adding (i.e.
* writing) rule after rule, without relying on the task's context. This
* reentrant design is also used in a read way to enforce the ruleset on the
* current task.
*/
static const struct file_operations ruleset_fops = {
.release = fop_ruleset_release,
.read = fop_dummy_read,
.write = fop_dummy_write,
};
#define LANDLOCK_ABI_VERSION 3
/**
* sys_landlock_create_ruleset - Create a new ruleset
*
* @attr: Pointer to a &struct landlock_ruleset_attr identifying the scope of
* the new ruleset.
* @size: Size of the pointed &struct landlock_ruleset_attr (needed for
* backward and forward compatibility).
* @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION.
*
* This system call enables to create a new Landlock ruleset, and returns the
* related file descriptor on success.
*
* If @flags is %LANDLOCK_CREATE_RULESET_VERSION and @attr is NULL and @size is
* 0, then the returned value is the highest supported Landlock ABI version
* (starting at 1).
*
* Possible returned errors are:
*
* - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
* - %EINVAL: unknown @flags, or unknown access, or too small @size;
* - %E2BIG or %EFAULT: @attr or @size inconsistencies;
* - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs.
*/
SYSCALL_DEFINE3(landlock_create_ruleset,
const struct landlock_ruleset_attr __user *const, attr,
const size_t, size, const __u32, flags)
{
struct landlock_ruleset_attr ruleset_attr;
struct landlock_ruleset *ruleset;
int err, ruleset_fd;
/* Build-time checks. */
build_check_abi();
if (!landlock_initialized)
return -EOPNOTSUPP;
if (flags) {
if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr &&
!size)
return LANDLOCK_ABI_VERSION;
return -EINVAL;
}
/* Copies raw user space buffer. */
err = copy_min_struct_from_user(&ruleset_attr, sizeof(ruleset_attr),
offsetofend(typeof(ruleset_attr),
handled_access_fs),
attr, size);
if (err)
return err;
/* Checks content (and 32-bits cast). */
if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) !=
LANDLOCK_MASK_ACCESS_FS)
return -EINVAL;
/* Checks arguments and transforms to kernel struct. */
ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
/* Creates anonymous FD referring to the ruleset. */
ruleset_fd = anon_inode_getfd("[landlock-ruleset]", &ruleset_fops,
ruleset, O_RDWR | O_CLOEXEC);
if (ruleset_fd < 0)
landlock_put_ruleset(ruleset);
return ruleset_fd;
}
/*
* Returns an owned ruleset from a FD. It is thus needed to call
* landlock_put_ruleset() on the return value.
*/
static struct landlock_ruleset *get_ruleset_from_fd(const int fd,
const fmode_t mode)
{
struct fd ruleset_f;
struct landlock_ruleset *ruleset;
ruleset_f = fdget(fd);
if (!ruleset_f.file)
return ERR_PTR(-EBADF);
/* Checks FD type and access right. */
if (ruleset_f.file->f_op != &ruleset_fops) {
ruleset = ERR_PTR(-EBADFD);
goto out_fdput;
}
if (!(ruleset_f.file->f_mode & mode)) {
ruleset = ERR_PTR(-EPERM);
goto out_fdput;
}
ruleset = ruleset_f.file->private_data;
if (WARN_ON_ONCE(ruleset->num_layers != 1)) {
ruleset = ERR_PTR(-EINVAL);
goto out_fdput;
}
landlock_get_ruleset(ruleset);
out_fdput:
fdput(ruleset_f);
return ruleset;
}
/* Path handling */
/*
* @path: Must call put_path(@path) after the call if it succeeded.
*/
static int get_path_from_fd(const s32 fd, struct path *const path)
{
struct fd f;
int err = 0;
BUILD_BUG_ON(!__same_type(
fd, ((struct landlock_path_beneath_attr *)NULL)->parent_fd));
/* Handles O_PATH. */
f = fdget_raw(fd);
if (!f.file)
return -EBADF;
/*
* Forbids ruleset FDs, internal filesystems (e.g. nsfs), including
* pseudo filesystems that will never be mountable (e.g. sockfs,
* pipefs).
*/
if ((f.file->f_op == &ruleset_fops) ||
(f.file->f_path.mnt->mnt_flags & MNT_INTERNAL) ||
(f.file->f_path.dentry->d_sb->s_flags & SB_NOUSER) ||
d_is_negative(f.file->f_path.dentry) ||
IS_PRIVATE(d_backing_inode(f.file->f_path.dentry))) {
err = -EBADFD;
goto out_fdput;
}
*path = f.file->f_path;
path_get(path);
out_fdput:
fdput(f);
return err;
}
/**
* sys_landlock_add_rule - Add a new rule to a ruleset
*
* @ruleset_fd: File descriptor tied to the ruleset that should be extended
* with the new rule.
* @rule_type: Identify the structure type pointed to by @rule_attr (only
* %LANDLOCK_RULE_PATH_BENEATH for now).
* @rule_attr: Pointer to a rule (only of type &struct
* landlock_path_beneath_attr for now).
* @flags: Must be 0.
*
* This system call enables to define a new rule and add it to an existing
* ruleset.
*
* Possible returned errors are:
*
* - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
* - %EINVAL: @flags is not 0, or inconsistent access in the rule (i.e.
* &landlock_path_beneath_attr.allowed_access is not a subset of the
* ruleset handled accesses);
* - %ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access);
* - %EBADF: @ruleset_fd is not a file descriptor for the current thread, or a
* member of @rule_attr is not a file descriptor as expected;
* - %EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of
* @rule_attr is not the expected file descriptor type;
* - %EPERM: @ruleset_fd has no write access to the underlying ruleset;
* - %EFAULT: @rule_attr inconsistency.
*/
SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
const enum landlock_rule_type, rule_type,
const void __user *const, rule_attr, const __u32, flags)
{
struct landlock_path_beneath_attr path_beneath_attr;
struct path path;
struct landlock_ruleset *ruleset;
int res, err;
if (!landlock_initialized)
return -EOPNOTSUPP;
/* No flag for now. */
if (flags)
return -EINVAL;
/* Gets and checks the ruleset. */
ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_WRITE);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
if (rule_type != LANDLOCK_RULE_PATH_BENEATH) {
err = -EINVAL;
goto out_put_ruleset;
}
/* Copies raw user space buffer, only one type for now. */
res = copy_from_user(&path_beneath_attr, rule_attr,
sizeof(path_beneath_attr));
if (res) {
err = -EFAULT;
goto out_put_ruleset;
}
/*
* Informs about useless rule: empty allowed_access (i.e. deny rules)
* are ignored in path walks.
*/
if (!path_beneath_attr.allowed_access) {
err = -ENOMSG;
goto out_put_ruleset;
}
/*
* Checks that allowed_access matches the @ruleset constraints
* (ruleset->fs_access_masks[0] is automatically upgraded to 64-bits).
*/
if ((path_beneath_attr.allowed_access | ruleset->fs_access_masks[0]) !=
ruleset->fs_access_masks[0]) {
err = -EINVAL;
goto out_put_ruleset;
}
/* Gets and checks the new rule. */
err = get_path_from_fd(path_beneath_attr.parent_fd, &path);
if (err)
goto out_put_ruleset;
/* Imports the new rule. */
err = landlock_append_fs_rule(ruleset, &path,
path_beneath_attr.allowed_access);
path_put(&path);
out_put_ruleset:
landlock_put_ruleset(ruleset);
return err;
}
/* Enforcement */
/**
* sys_landlock_restrict_self - Enforce a ruleset on the calling thread
*
* @ruleset_fd: File descriptor tied to the ruleset to merge with the target.
* @flags: Must be 0.
*
* This system call enables to enforce a Landlock ruleset on the current
* thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its
* namespace or is running with no_new_privs. This avoids scenarios where
* unprivileged tasks can affect the behavior of privileged children.
*
* Possible returned errors are:
*
* - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
* - %EINVAL: @flags is not 0.
* - %EBADF: @ruleset_fd is not a file descriptor for the current thread;
* - %EBADFD: @ruleset_fd is not a ruleset file descriptor;
* - %EPERM: @ruleset_fd has no read access to the underlying ruleset, or the
* current thread is not running with no_new_privs, or it doesn't have
* %CAP_SYS_ADMIN in its namespace.
* - %E2BIG: The maximum number of stacked rulesets is reached for the current
* thread.
*/
SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
flags)
{
struct landlock_ruleset *new_dom, *ruleset;
struct cred *new_cred;
struct landlock_cred_security *new_llcred;
int err;
if (!landlock_initialized)
return -EOPNOTSUPP;
/*
* Similar checks as for seccomp(2), except that an -EPERM may be
* returned.
*/
if (!task_no_new_privs(current) &&
!ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
/* No flag for now. */
if (flags)
return -EINVAL;
/* Gets and checks the ruleset. */
ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
/* Prepares new credentials. */
new_cred = prepare_creds();
if (!new_cred) {
err = -ENOMEM;
goto out_put_ruleset;
}
new_llcred = landlock_cred(new_cred);
/*
* There is no possible race condition while copying and manipulating
* the current credentials because they are dedicated per thread.
*/
new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset);
if (IS_ERR(new_dom)) {
err = PTR_ERR(new_dom);
goto out_put_creds;
}
/* Replaces the old (prepared) domain. */
landlock_put_ruleset(new_llcred->domain);
new_llcred->domain = new_dom;
landlock_put_ruleset(ruleset);
return commit_creds(new_cred);
out_put_creds:
abort_creds(new_cred);
out_put_ruleset:
landlock_put_ruleset(ruleset);
return err;
}
| linux-master | security/landlock/syscalls.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Landlock LSM - Filesystem management and hooks
*
* Copyright © 2016-2020 Mickaël Salaün <[email protected]>
* Copyright © 2018-2020 ANSSI
* Copyright © 2021-2022 Microsoft Corporation
*/
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/bits.h>
#include <linux/compiler_types.h>
#include <linux/dcache.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/lsm_hooks.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/path.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/types.h>
#include <linux/wait_bit.h>
#include <linux/workqueue.h>
#include <uapi/linux/landlock.h>
#include "common.h"
#include "cred.h"
#include "fs.h"
#include "limits.h"
#include "object.h"
#include "ruleset.h"
#include "setup.h"
/* Underlying object management */
static void release_inode(struct landlock_object *const object)
__releases(object->lock)
{
struct inode *const inode = object->underobj;
struct super_block *sb;
if (!inode) {
spin_unlock(&object->lock);
return;
}
/*
* Protects against concurrent use by hook_sb_delete() of the reference
* to the underlying inode.
*/
object->underobj = NULL;
/*
* Makes sure that if the filesystem is concurrently unmounted,
* hook_sb_delete() will wait for us to finish iput().
*/
sb = inode->i_sb;
atomic_long_inc(&landlock_superblock(sb)->inode_refs);
spin_unlock(&object->lock);
/*
* Because object->underobj was not NULL, hook_sb_delete() and
* get_inode_object() guarantee that it is safe to reset
* landlock_inode(inode)->object while it is not NULL. It is therefore
* not necessary to lock inode->i_lock.
*/
rcu_assign_pointer(landlock_inode(inode)->object, NULL);
/*
* Now, new rules can safely be tied to @inode with get_inode_object().
*/
iput(inode);
if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
wake_up_var(&landlock_superblock(sb)->inode_refs);
}
static const struct landlock_object_underops landlock_fs_underops = {
.release = release_inode
};
/* Ruleset management */
static struct landlock_object *get_inode_object(struct inode *const inode)
{
struct landlock_object *object, *new_object;
struct landlock_inode_security *inode_sec = landlock_inode(inode);
rcu_read_lock();
retry:
object = rcu_dereference(inode_sec->object);
if (object) {
if (likely(refcount_inc_not_zero(&object->usage))) {
rcu_read_unlock();
return object;
}
/*
* We are racing with release_inode(), the object is going
* away. Wait for release_inode(), then retry.
*/
spin_lock(&object->lock);
spin_unlock(&object->lock);
goto retry;
}
rcu_read_unlock();
/*
* If there is no object tied to @inode, then create a new one (without
* holding any locks).
*/
new_object = landlock_create_object(&landlock_fs_underops, inode);
if (IS_ERR(new_object))
return new_object;
/*
* Protects against concurrent calls to get_inode_object() or
* hook_sb_delete().
*/
spin_lock(&inode->i_lock);
if (unlikely(rcu_access_pointer(inode_sec->object))) {
/* Someone else just created the object, bail out and retry. */
spin_unlock(&inode->i_lock);
kfree(new_object);
rcu_read_lock();
goto retry;
}
/*
* @inode will be released by hook_sb_delete() on its superblock
* shutdown, or by release_inode() when no more ruleset references the
* related object.
*/
ihold(inode);
rcu_assign_pointer(inode_sec->object, new_object);
spin_unlock(&inode->i_lock);
return new_object;
}
/* All access rights that can be tied to files. */
/* clang-format off */
#define ACCESS_FILE ( \
LANDLOCK_ACCESS_FS_EXECUTE | \
LANDLOCK_ACCESS_FS_WRITE_FILE | \
LANDLOCK_ACCESS_FS_READ_FILE | \
LANDLOCK_ACCESS_FS_TRUNCATE)
/* clang-format on */
/*
* All access rights that are denied by default whether they are handled or not
* by a ruleset/layer. This must be ORed with all ruleset->fs_access_masks[]
* entries when we need to get the absolute handled access masks.
*/
/* clang-format off */
#define ACCESS_INITIALLY_DENIED ( \
LANDLOCK_ACCESS_FS_REFER)
/* clang-format on */
/*
* @path: Should have been checked by get_path_from_fd().
*/
int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
const struct path *const path,
access_mask_t access_rights)
{
int err;
struct landlock_object *object;
/* Files only get access rights that make sense. */
if (!d_is_dir(path->dentry) &&
(access_rights | ACCESS_FILE) != ACCESS_FILE)
return -EINVAL;
if (WARN_ON_ONCE(ruleset->num_layers != 1))
return -EINVAL;
/* Transforms relative access rights to absolute ones. */
access_rights |=
LANDLOCK_MASK_ACCESS_FS &
~(ruleset->fs_access_masks[0] | ACCESS_INITIALLY_DENIED);
object = get_inode_object(d_backing_inode(path->dentry));
if (IS_ERR(object))
return PTR_ERR(object);
mutex_lock(&ruleset->lock);
err = landlock_insert_rule(ruleset, object, access_rights);
mutex_unlock(&ruleset->lock);
/*
* No need to check for an error because landlock_insert_rule()
* increments the refcount for the new object if needed.
*/
landlock_put_object(object);
return err;
}
/* Access-control management */
/*
* The lifetime of the returned rule is tied to @domain.
*
* Returns NULL if no rule is found or if @dentry is negative.
*/
static inline const struct landlock_rule *
find_rule(const struct landlock_ruleset *const domain,
const struct dentry *const dentry)
{
const struct landlock_rule *rule;
const struct inode *inode;
/* Ignores nonexistent leafs. */
if (d_is_negative(dentry))
return NULL;
inode = d_backing_inode(dentry);
rcu_read_lock();
rule = landlock_find_rule(
domain, rcu_dereference(landlock_inode(inode)->object));
rcu_read_unlock();
return rule;
}
/*
* @layer_masks is read and may be updated according to the access request and
* the matching rule.
*
* Returns true if the request is allowed (i.e. relevant layer masks for the
* request are empty).
*/
static inline bool
unmask_layers(const struct landlock_rule *const rule,
const access_mask_t access_request,
layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
{
size_t layer_level;
if (!access_request || !layer_masks)
return true;
if (!rule)
return false;
/*
* An access is granted if, for each policy layer, at least one rule
* encountered on the pathwalk grants the requested access,
* regardless of its position in the layer stack. We must then check
* the remaining layers for each inode, from the first added layer to
* the last one. When there is multiple requested accesses, for each
* policy layer, the full set of requested accesses may not be granted
* by only one rule, but by the union (binary OR) of multiple rules.
* E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
*/
for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
const struct landlock_layer *const layer =
&rule->layers[layer_level];
const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
const unsigned long access_req = access_request;
unsigned long access_bit;
bool is_empty;
/*
* Records in @layer_masks which layer grants access to each
* requested access.
*/
is_empty = true;
for_each_set_bit(access_bit, &access_req,
ARRAY_SIZE(*layer_masks)) {
if (layer->access & BIT_ULL(access_bit))
(*layer_masks)[access_bit] &= ~layer_bit;
is_empty = is_empty && !(*layer_masks)[access_bit];
}
if (is_empty)
return true;
}
return false;
}
/*
* Allows access to pseudo filesystems that will never be mountable (e.g.
* sockfs, pipefs), but can still be reachable through
* /proc/<pid>/fd/<file-descriptor>
*/
static inline bool is_nouser_or_private(const struct dentry *dentry)
{
return (dentry->d_sb->s_flags & SB_NOUSER) ||
(d_is_positive(dentry) &&
unlikely(IS_PRIVATE(d_backing_inode(dentry))));
}
static inline access_mask_t
get_handled_accesses(const struct landlock_ruleset *const domain)
{
access_mask_t access_dom = ACCESS_INITIALLY_DENIED;
size_t layer_level;
for (layer_level = 0; layer_level < domain->num_layers; layer_level++)
access_dom |= domain->fs_access_masks[layer_level];
return access_dom & LANDLOCK_MASK_ACCESS_FS;
}
/**
* init_layer_masks - Initialize layer masks from an access request
*
* Populates @layer_masks such that for each access right in @access_request,
* the bits for all the layers are set where this access right is handled.
*
* @domain: The domain that defines the current restrictions.
* @access_request: The requested access rights to check.
* @layer_masks: The layer masks to populate.
*
* Returns: An access mask where each access right bit is set which is handled
* in any of the active layers in @domain.
*/
static inline access_mask_t
init_layer_masks(const struct landlock_ruleset *const domain,
const access_mask_t access_request,
layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
{
access_mask_t handled_accesses = 0;
size_t layer_level;
memset(layer_masks, 0, sizeof(*layer_masks));
/* An empty access request can happen because of O_WRONLY | O_RDWR. */
if (!access_request)
return 0;
/* Saves all handled accesses per layer. */
for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
const unsigned long access_req = access_request;
unsigned long access_bit;
for_each_set_bit(access_bit, &access_req,
ARRAY_SIZE(*layer_masks)) {
/*
* Artificially handles all initially denied by default
* access rights.
*/
if (BIT_ULL(access_bit) &
(domain->fs_access_masks[layer_level] |
ACCESS_INITIALLY_DENIED)) {
(*layer_masks)[access_bit] |=
BIT_ULL(layer_level);
handled_accesses |= BIT_ULL(access_bit);
}
}
}
return handled_accesses;
}
/*
* Check that a destination file hierarchy has more restrictions than a source
* file hierarchy. This is only used for link and rename actions.
*
* @layer_masks_child2: Optional child masks.
*/
static inline bool no_more_access(
const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
const bool child1_is_directory,
const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
const bool child2_is_directory)
{
unsigned long access_bit;
for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
access_bit++) {
/* Ignores accesses that only make sense for directories. */
const bool is_file_access =
!!(BIT_ULL(access_bit) & ACCESS_FILE);
if (child1_is_directory || is_file_access) {
/*
* Checks if the destination restrictions are a
* superset of the source ones (i.e. inherited access
* rights without child exceptions):
* restrictions(parent2) >= restrictions(child1)
*/
if ((((*layer_masks_parent1)[access_bit] &
(*layer_masks_child1)[access_bit]) |
(*layer_masks_parent2)[access_bit]) !=
(*layer_masks_parent2)[access_bit])
return false;
}
if (!layer_masks_child2)
continue;
if (child2_is_directory || is_file_access) {
/*
* Checks inverted restrictions for RENAME_EXCHANGE:
* restrictions(parent1) >= restrictions(child2)
*/
if ((((*layer_masks_parent2)[access_bit] &
(*layer_masks_child2)[access_bit]) |
(*layer_masks_parent1)[access_bit]) !=
(*layer_masks_parent1)[access_bit])
return false;
}
}
return true;
}
/*
* Removes @layer_masks accesses that are not requested.
*
* Returns true if the request is allowed, false otherwise.
*/
static inline bool
scope_to_request(const access_mask_t access_request,
layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
{
const unsigned long access_req = access_request;
unsigned long access_bit;
if (WARN_ON_ONCE(!layer_masks))
return true;
for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
(*layer_masks)[access_bit] = 0;
return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
}
/*
* Returns true if there is at least one access right different than
* LANDLOCK_ACCESS_FS_REFER.
*/
static inline bool
is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
const access_mask_t access_request)
{
unsigned long access_bit;
/* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */
const unsigned long access_check = access_request &
~LANDLOCK_ACCESS_FS_REFER;
if (!layer_masks)
return false;
for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
if ((*layer_masks)[access_bit])
return true;
}
return false;
}
/**
* is_access_to_paths_allowed - Check accesses for requests with a common path
*
* @domain: Domain to check against.
* @path: File hierarchy to walk through.
* @access_request_parent1: Accesses to check, once @layer_masks_parent1 is
* equal to @layer_masks_parent2 (if any). This is tied to the unique
* requested path for most actions, or the source in case of a refer action
* (i.e. rename or link), or the source and destination in case of
* RENAME_EXCHANGE.
* @layer_masks_parent1: Pointer to a matrix of layer masks per access
* masks, identifying the layers that forbid a specific access. Bits from
* this matrix can be unset according to the @path walk. An empty matrix
* means that @domain allows all possible Landlock accesses (i.e. not only
* those identified by @access_request_parent1). This matrix can
* initially refer to domain layer masks and, when the accesses for the
* destination and source are the same, to requested layer masks.
* @dentry_child1: Dentry to the initial child of the parent1 path. This
* pointer must be NULL for non-refer actions (i.e. not link nor rename).
* @access_request_parent2: Similar to @access_request_parent1 but for a
* request involving a source and a destination. This refers to the
* destination, except in case of RENAME_EXCHANGE where it also refers to
* the source. Must be set to 0 when using a simple path request.
* @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
* action. This must be NULL otherwise.
* @dentry_child2: Dentry to the initial child of the parent2 path. This
* pointer is only set for RENAME_EXCHANGE actions and must be NULL
* otherwise.
*
* This helper first checks that the destination has a superset of restrictions
* compared to the source (if any) for a common path. Because of
* RENAME_EXCHANGE actions, source and destinations may be swapped. It then
* checks that the collected accesses and the remaining ones are enough to
* allow the request.
*
* Returns:
* - true if the access request is granted;
* - false otherwise.
*/
static bool is_access_to_paths_allowed(
const struct landlock_ruleset *const domain,
const struct path *const path,
const access_mask_t access_request_parent1,
layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
const struct dentry *const dentry_child1,
const access_mask_t access_request_parent2,
layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
const struct dentry *const dentry_child2)
{
bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
child1_is_directory = true, child2_is_directory = true;
struct path walker_path;
access_mask_t access_masked_parent1, access_masked_parent2;
layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
_layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
(*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
if (!access_request_parent1 && !access_request_parent2)
return true;
if (WARN_ON_ONCE(!domain || !path))
return true;
if (is_nouser_or_private(path->dentry))
return true;
if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
return false;
if (unlikely(layer_masks_parent2)) {
if (WARN_ON_ONCE(!dentry_child1))
return false;
/*
* For a double request, first check for potential privilege
* escalation by looking at domain handled accesses (which are
* a superset of the meaningful requested accesses).
*/
access_masked_parent1 = access_masked_parent2 =
get_handled_accesses(domain);
is_dom_check = true;
} else {
if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
return false;
/* For a simple request, only check for requested accesses. */
access_masked_parent1 = access_request_parent1;
access_masked_parent2 = access_request_parent2;
is_dom_check = false;
}
if (unlikely(dentry_child1)) {
unmask_layers(find_rule(domain, dentry_child1),
init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
&_layer_masks_child1),
&_layer_masks_child1);
layer_masks_child1 = &_layer_masks_child1;
child1_is_directory = d_is_dir(dentry_child1);
}
if (unlikely(dentry_child2)) {
unmask_layers(find_rule(domain, dentry_child2),
init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
&_layer_masks_child2),
&_layer_masks_child2);
layer_masks_child2 = &_layer_masks_child2;
child2_is_directory = d_is_dir(dentry_child2);
}
walker_path = *path;
path_get(&walker_path);
/*
* We need to walk through all the hierarchy to not miss any relevant
* restriction.
*/
while (true) {
struct dentry *parent_dentry;
const struct landlock_rule *rule;
/*
* If at least all accesses allowed on the destination are
* already allowed on the source, respectively if there is at
* least as much as restrictions on the destination than on the
* source, then we can safely refer files from the source to
* the destination without risking a privilege escalation.
* This also applies in the case of RENAME_EXCHANGE, which
* implies checks on both direction. This is crucial for
* standalone multilayered security policies. Furthermore,
* this helps avoid policy writers to shoot themselves in the
* foot.
*/
if (unlikely(is_dom_check &&
no_more_access(
layer_masks_parent1, layer_masks_child1,
child1_is_directory, layer_masks_parent2,
layer_masks_child2,
child2_is_directory))) {
allowed_parent1 = scope_to_request(
access_request_parent1, layer_masks_parent1);
allowed_parent2 = scope_to_request(
access_request_parent2, layer_masks_parent2);
/* Stops when all accesses are granted. */
if (allowed_parent1 && allowed_parent2)
break;
/*
* Now, downgrades the remaining checks from domain
* handled accesses to requested accesses.
*/
is_dom_check = false;
access_masked_parent1 = access_request_parent1;
access_masked_parent2 = access_request_parent2;
}
rule = find_rule(domain, walker_path.dentry);
allowed_parent1 = unmask_layers(rule, access_masked_parent1,
layer_masks_parent1);
allowed_parent2 = unmask_layers(rule, access_masked_parent2,
layer_masks_parent2);
/* Stops when a rule from each layer grants access. */
if (allowed_parent1 && allowed_parent2)
break;
jump_up:
if (walker_path.dentry == walker_path.mnt->mnt_root) {
if (follow_up(&walker_path)) {
/* Ignores hidden mount points. */
goto jump_up;
} else {
/*
* Stops at the real root. Denies access
* because not all layers have granted access.
*/
break;
}
}
if (unlikely(IS_ROOT(walker_path.dentry))) {
/*
* Stops at disconnected root directories. Only allows
* access to internal filesystems (e.g. nsfs, which is
* reachable through /proc/<pid>/ns/<namespace>).
*/
allowed_parent1 = allowed_parent2 =
!!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
break;
}
parent_dentry = dget_parent(walker_path.dentry);
dput(walker_path.dentry);
walker_path.dentry = parent_dentry;
}
path_put(&walker_path);
return allowed_parent1 && allowed_parent2;
}
static inline int check_access_path(const struct landlock_ruleset *const domain,
const struct path *const path,
access_mask_t access_request)
{
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
access_request = init_layer_masks(domain, access_request, &layer_masks);
if (is_access_to_paths_allowed(domain, path, access_request,
&layer_masks, NULL, 0, NULL, NULL))
return 0;
return -EACCES;
}
static inline int current_check_access_path(const struct path *const path,
const access_mask_t access_request)
{
const struct landlock_ruleset *const dom =
landlock_get_current_domain();
if (!dom)
return 0;
return check_access_path(dom, path, access_request);
}
static inline access_mask_t get_mode_access(const umode_t mode)
{
switch (mode & S_IFMT) {
case S_IFLNK:
return LANDLOCK_ACCESS_FS_MAKE_SYM;
case 0:
/* A zero mode translates to S_IFREG. */
case S_IFREG:
return LANDLOCK_ACCESS_FS_MAKE_REG;
case S_IFDIR:
return LANDLOCK_ACCESS_FS_MAKE_DIR;
case S_IFCHR:
return LANDLOCK_ACCESS_FS_MAKE_CHAR;
case S_IFBLK:
return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
case S_IFIFO:
return LANDLOCK_ACCESS_FS_MAKE_FIFO;
case S_IFSOCK:
return LANDLOCK_ACCESS_FS_MAKE_SOCK;
default:
WARN_ON_ONCE(1);
return 0;
}
}
static inline access_mask_t maybe_remove(const struct dentry *const dentry)
{
if (d_is_negative(dentry))
return 0;
return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
LANDLOCK_ACCESS_FS_REMOVE_FILE;
}
/**
* collect_domain_accesses - Walk through a file path and collect accesses
*
* @domain: Domain to check against.
* @mnt_root: Last directory to check.
* @dir: Directory to start the walk from.
* @layer_masks_dom: Where to store the collected accesses.
*
* This helper is useful to begin a path walk from the @dir directory to a
* @mnt_root directory used as a mount point. This mount point is the common
* ancestor between the source and the destination of a renamed and linked
* file. While walking from @dir to @mnt_root, we record all the domain's
* allowed accesses in @layer_masks_dom.
*
* This is similar to is_access_to_paths_allowed() but much simpler because it
* only handles walking on the same mount point and only checks one set of
* accesses.
*
* Returns:
* - true if all the domain access rights are allowed for @dir;
* - false if the walk reached @mnt_root.
*/
static bool collect_domain_accesses(
const struct landlock_ruleset *const domain,
const struct dentry *const mnt_root, struct dentry *dir,
layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
{
unsigned long access_dom;
bool ret = false;
if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
return true;
if (is_nouser_or_private(dir))
return true;
access_dom = init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
layer_masks_dom);
dget(dir);
while (true) {
struct dentry *parent_dentry;
/* Gets all layers allowing all domain accesses. */
if (unmask_layers(find_rule(domain, dir), access_dom,
layer_masks_dom)) {
/*
* Stops when all handled accesses are allowed by at
* least one rule in each layer.
*/
ret = true;
break;
}
/* We should not reach a root other than @mnt_root. */
if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
break;
parent_dentry = dget_parent(dir);
dput(dir);
dir = parent_dentry;
}
dput(dir);
return ret;
}
/**
* current_check_refer_path - Check if a rename or link action is allowed
*
* @old_dentry: File or directory requested to be moved or linked.
* @new_dir: Destination parent directory.
* @new_dentry: Destination file or directory.
* @removable: Sets to true if it is a rename operation.
* @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE.
*
* Because of its unprivileged constraints, Landlock relies on file hierarchies
* (and not only inodes) to tie access rights to files. Being able to link or
* rename a file hierarchy brings some challenges. Indeed, moving or linking a
* file (i.e. creating a new reference to an inode) can have an impact on the
* actions allowed for a set of files if it would change its parent directory
* (i.e. reparenting).
*
* To avoid trivial access right bypasses, Landlock first checks if the file or
* directory requested to be moved would gain new access rights inherited from
* its new hierarchy. Before returning any error, Landlock then checks that
* the parent source hierarchy and the destination hierarchy would allow the
* link or rename action. If it is not the case, an error with EACCES is
* returned to inform user space that there is no way to remove or create the
* requested source file type. If it should be allowed but the new inherited
* access rights would be greater than the source access rights, then the
* kernel returns an error with EXDEV. Prioritizing EACCES over EXDEV enables
* user space to abort the whole operation if there is no way to do it, or to
* manually copy the source to the destination if this remains allowed, e.g.
* because file creation is allowed on the destination directory but not direct
* linking.
*
* To achieve this goal, the kernel needs to compare two file hierarchies: the
* one identifying the source file or directory (including itself), and the
* destination one. This can be seen as a multilayer partial ordering problem.
* The kernel walks through these paths and collects in a matrix the access
* rights that are denied per layer. These matrices are then compared to see
* if the destination one has more (or the same) restrictions as the source
* one. If this is the case, the requested action will not return EXDEV, which
* doesn't mean the action is allowed. The parent hierarchy of the source
* (i.e. parent directory), and the destination hierarchy must also be checked
* to verify that they explicitly allow such action (i.e. referencing,
* creation and potentially removal rights). The kernel implementation is then
* required to rely on potentially four matrices of access rights: one for the
* source file or directory (i.e. the child), a potentially other one for the
* other source/destination (in case of RENAME_EXCHANGE), one for the source
* parent hierarchy and a last one for the destination hierarchy. These
* ephemeral matrices take some space on the stack, which limits the number of
* layers to a deemed reasonable number: 16.
*
* Returns:
* - 0 if access is allowed;
* - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
* - -EACCES if file removal or creation is denied.
*/
static int current_check_refer_path(struct dentry *const old_dentry,
const struct path *const new_dir,
struct dentry *const new_dentry,
const bool removable, const bool exchange)
{
const struct landlock_ruleset *const dom =
landlock_get_current_domain();
bool allow_parent1, allow_parent2;
access_mask_t access_request_parent1, access_request_parent2;
struct path mnt_dir;
layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS],
layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS];
if (!dom)
return 0;
if (WARN_ON_ONCE(dom->num_layers < 1))
return -EACCES;
if (unlikely(d_is_negative(old_dentry)))
return -ENOENT;
if (exchange) {
if (unlikely(d_is_negative(new_dentry)))
return -ENOENT;
access_request_parent1 =
get_mode_access(d_backing_inode(new_dentry)->i_mode);
} else {
access_request_parent1 = 0;
}
access_request_parent2 =
get_mode_access(d_backing_inode(old_dentry)->i_mode);
if (removable) {
access_request_parent1 |= maybe_remove(old_dentry);
access_request_parent2 |= maybe_remove(new_dentry);
}
/* The mount points are the same for old and new paths, cf. EXDEV. */
if (old_dentry->d_parent == new_dir->dentry) {
/*
* The LANDLOCK_ACCESS_FS_REFER access right is not required
* for same-directory referer (i.e. no reparenting).
*/
access_request_parent1 = init_layer_masks(
dom, access_request_parent1 | access_request_parent2,
&layer_masks_parent1);
if (is_access_to_paths_allowed(
dom, new_dir, access_request_parent1,
&layer_masks_parent1, NULL, 0, NULL, NULL))
return 0;
return -EACCES;
}
access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
/* Saves the common mount point. */
mnt_dir.mnt = new_dir->mnt;
mnt_dir.dentry = new_dir->mnt->mnt_root;
/* new_dir->dentry is equal to new_dentry->d_parent */
allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
old_dentry->d_parent,
&layer_masks_parent1);
allow_parent2 = collect_domain_accesses(
dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
if (allow_parent1 && allow_parent2)
return 0;
/*
* To be able to compare source and destination domain access rights,
* take into account the @old_dentry access rights aggregated with its
* parent access rights. This will be useful to compare with the
* destination parent access rights.
*/
if (is_access_to_paths_allowed(
dom, &mnt_dir, access_request_parent1, &layer_masks_parent1,
old_dentry, access_request_parent2, &layer_masks_parent2,
exchange ? new_dentry : NULL))
return 0;
/*
* This prioritizes EACCES over EXDEV for all actions, including
* renames with RENAME_EXCHANGE.
*/
if (likely(is_eacces(&layer_masks_parent1, access_request_parent1) ||
is_eacces(&layer_masks_parent2, access_request_parent2)))
return -EACCES;
/*
* Gracefully forbids reparenting if the destination directory
* hierarchy is not a superset of restrictions of the source directory
* hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the
* source or the destination.
*/
return -EXDEV;
}
/* Inode hooks */
static void hook_inode_free_security(struct inode *const inode)
{
/*
* All inodes must already have been untied from their object by
* release_inode() or hook_sb_delete().
*/
WARN_ON_ONCE(landlock_inode(inode)->object);
}
/* Super-block hooks */
/*
* Release the inodes used in a security policy.
*
* Cf. fsnotify_unmount_inodes() and invalidate_inodes()
*/
static void hook_sb_delete(struct super_block *const sb)
{
struct inode *inode, *prev_inode = NULL;
if (!landlock_initialized)
return;
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
struct landlock_object *object;
/* Only handles referenced inodes. */
if (!atomic_read(&inode->i_count))
continue;
/*
* Protects against concurrent modification of inode (e.g.
* from get_inode_object()).
*/
spin_lock(&inode->i_lock);
/*
* Checks I_FREEING and I_WILL_FREE to protect against a race
* condition when release_inode() just called iput(), which
* could lead to a NULL dereference of inode->security or a
* second call to iput() for the same Landlock object. Also
* checks I_NEW because such inode cannot be tied to an object.
*/
if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
spin_unlock(&inode->i_lock);
continue;
}
rcu_read_lock();
object = rcu_dereference(landlock_inode(inode)->object);
if (!object) {
rcu_read_unlock();
spin_unlock(&inode->i_lock);
continue;
}
/* Keeps a reference to this inode until the next loop walk. */
__iget(inode);
spin_unlock(&inode->i_lock);
/*
* If there is no concurrent release_inode() ongoing, then we
* are in charge of calling iput() on this inode, otherwise we
* will just wait for it to finish.
*/
spin_lock(&object->lock);
if (object->underobj == inode) {
object->underobj = NULL;
spin_unlock(&object->lock);
rcu_read_unlock();
/*
* Because object->underobj was not NULL,
* release_inode() and get_inode_object() guarantee
* that it is safe to reset
* landlock_inode(inode)->object while it is not NULL.
* It is therefore not necessary to lock inode->i_lock.
*/
rcu_assign_pointer(landlock_inode(inode)->object, NULL);
/*
* At this point, we own the ihold() reference that was
* originally set up by get_inode_object() and the
* __iget() reference that we just set in this loop
* walk. Therefore the following call to iput() will
* not sleep nor drop the inode because there is now at
* least two references to it.
*/
iput(inode);
} else {
spin_unlock(&object->lock);
rcu_read_unlock();
}
if (prev_inode) {
/*
* At this point, we still own the __iget() reference
* that we just set in this loop walk. Therefore we
* can drop the list lock and know that the inode won't
* disappear from under us until the next loop walk.
*/
spin_unlock(&sb->s_inode_list_lock);
/*
* We can now actually put the inode reference from the
* previous loop walk, which is not needed anymore.
*/
iput(prev_inode);
cond_resched();
spin_lock(&sb->s_inode_list_lock);
}
prev_inode = inode;
}
spin_unlock(&sb->s_inode_list_lock);
/* Puts the inode reference from the last loop walk, if any. */
if (prev_inode)
iput(prev_inode);
/* Waits for pending iput() in release_inode(). */
wait_var_event(&landlock_superblock(sb)->inode_refs,
!atomic_long_read(&landlock_superblock(sb)->inode_refs));
}
/*
* Because a Landlock security policy is defined according to the filesystem
* topology (i.e. the mount namespace), changing it may grant access to files
* not previously allowed.
*
* To make it simple, deny any filesystem topology modification by landlocked
* processes. Non-landlocked processes may still change the namespace of a
* landlocked process, but this kind of threat must be handled by a system-wide
* access-control security policy.
*
* This could be lifted in the future if Landlock can safely handle mount
* namespace updates requested by a landlocked process. Indeed, we could
* update the current domain (which is currently read-only) by taking into
* account the accesses of the source and the destination of a new mount point.
* However, it would also require to make all the child domains dynamically
* inherit these new constraints. Anyway, for backward compatibility reasons,
* a dedicated user space option would be required (e.g. as a ruleset flag).
*/
static int hook_sb_mount(const char *const dev_name,
const struct path *const path, const char *const type,
const unsigned long flags, void *const data)
{
if (!landlock_get_current_domain())
return 0;
return -EPERM;
}
static int hook_move_mount(const struct path *const from_path,
const struct path *const to_path)
{
if (!landlock_get_current_domain())
return 0;
return -EPERM;
}
/*
* Removing a mount point may reveal a previously hidden file hierarchy, which
* may then grant access to files, which may have previously been forbidden.
*/
static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
{
if (!landlock_get_current_domain())
return 0;
return -EPERM;
}
static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
{
if (!landlock_get_current_domain())
return 0;
return -EPERM;
}
/*
* pivot_root(2), like mount(2), changes the current mount namespace. It must
* then be forbidden for a landlocked process.
*
* However, chroot(2) may be allowed because it only changes the relative root
* directory of the current process. Moreover, it can be used to restrict the
* view of the filesystem.
*/
static int hook_sb_pivotroot(const struct path *const old_path,
const struct path *const new_path)
{
if (!landlock_get_current_domain())
return 0;
return -EPERM;
}
/* Path hooks */
static int hook_path_link(struct dentry *const old_dentry,
const struct path *const new_dir,
struct dentry *const new_dentry)
{
return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
false);
}
static int hook_path_rename(const struct path *const old_dir,
struct dentry *const old_dentry,
const struct path *const new_dir,
struct dentry *const new_dentry,
const unsigned int flags)
{
/* old_dir refers to old_dentry->d_parent and new_dir->mnt */
return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
!!(flags & RENAME_EXCHANGE));
}
static int hook_path_mkdir(const struct path *const dir,
struct dentry *const dentry, const umode_t mode)
{
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
}
static int hook_path_mknod(const struct path *const dir,
struct dentry *const dentry, const umode_t mode,
const unsigned int dev)
{
const struct landlock_ruleset *const dom =
landlock_get_current_domain();
if (!dom)
return 0;
return check_access_path(dom, dir, get_mode_access(mode));
}
static int hook_path_symlink(const struct path *const dir,
struct dentry *const dentry,
const char *const old_name)
{
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
}
static int hook_path_unlink(const struct path *const dir,
struct dentry *const dentry)
{
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
}
static int hook_path_rmdir(const struct path *const dir,
struct dentry *const dentry)
{
return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
}
static int hook_path_truncate(const struct path *const path)
{
return current_check_access_path(path, LANDLOCK_ACCESS_FS_TRUNCATE);
}
/* File hooks */
/**
* get_required_file_open_access - Get access needed to open a file
*
* @file: File being opened.
*
* Returns the access rights that are required for opening the given file,
* depending on the file type and open mode.
*/
static inline access_mask_t
get_required_file_open_access(const struct file *const file)
{
access_mask_t access = 0;
if (file->f_mode & FMODE_READ) {
/* A directory can only be opened in read mode. */
if (S_ISDIR(file_inode(file)->i_mode))
return LANDLOCK_ACCESS_FS_READ_DIR;
access = LANDLOCK_ACCESS_FS_READ_FILE;
}
if (file->f_mode & FMODE_WRITE)
access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
/* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
if (file->f_flags & __FMODE_EXEC)
access |= LANDLOCK_ACCESS_FS_EXECUTE;
return access;
}
static int hook_file_alloc_security(struct file *const file)
{
/*
* Grants all access rights, even if most of them are not checked later
* on. It is more consistent.
*
* Notably, file descriptors for regular files can also be acquired
* without going through the file_open hook, for example when using
* memfd_create(2).
*/
landlock_file(file)->allowed_access = LANDLOCK_MASK_ACCESS_FS;
return 0;
}
static int hook_file_open(struct file *const file)
{
layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
access_mask_t open_access_request, full_access_request, allowed_access;
const access_mask_t optional_access = LANDLOCK_ACCESS_FS_TRUNCATE;
const struct landlock_ruleset *const dom =
landlock_get_current_domain();
if (!dom)
return 0;
/*
* Because a file may be opened with O_PATH, get_required_file_open_access()
* may return 0. This case will be handled with a future Landlock
* evolution.
*/
open_access_request = get_required_file_open_access(file);
/*
* We look up more access than what we immediately need for open(), so
* that we can later authorize operations on opened files.
*/
full_access_request = open_access_request | optional_access;
if (is_access_to_paths_allowed(
dom, &file->f_path,
init_layer_masks(dom, full_access_request, &layer_masks),
&layer_masks, NULL, 0, NULL, NULL)) {
allowed_access = full_access_request;
} else {
unsigned long access_bit;
const unsigned long access_req = full_access_request;
/*
* Calculate the actual allowed access rights from layer_masks.
* Add each access right to allowed_access which has not been
* vetoed by any layer.
*/
allowed_access = 0;
for_each_set_bit(access_bit, &access_req,
ARRAY_SIZE(layer_masks)) {
if (!layer_masks[access_bit])
allowed_access |= BIT_ULL(access_bit);
}
}
/*
* For operations on already opened files (i.e. ftruncate()), it is the
* access rights at the time of open() which decide whether the
* operation is permitted. Therefore, we record the relevant subset of
* file access rights in the opened struct file.
*/
landlock_file(file)->allowed_access = allowed_access;
if ((open_access_request & allowed_access) == open_access_request)
return 0;
return -EACCES;
}
static int hook_file_truncate(struct file *const file)
{
/*
* Allows truncation if the truncate right was available at the time of
* opening the file, to get a consistent access check as for read, write
* and execute operations.
*
* Note: For checks done based on the file's Landlock allowed access, we
* enforce them independently of whether the current thread is in a
* Landlock domain, so that open files passed between independent
* processes retain their behaviour.
*/
if (landlock_file(file)->allowed_access & LANDLOCK_ACCESS_FS_TRUNCATE)
return 0;
return -EACCES;
}
static struct security_hook_list landlock_hooks[] __ro_after_init = {
LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
LSM_HOOK_INIT(sb_delete, hook_sb_delete),
LSM_HOOK_INIT(sb_mount, hook_sb_mount),
LSM_HOOK_INIT(move_mount, hook_move_mount),
LSM_HOOK_INIT(sb_umount, hook_sb_umount),
LSM_HOOK_INIT(sb_remount, hook_sb_remount),
LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
LSM_HOOK_INIT(path_link, hook_path_link),
LSM_HOOK_INIT(path_rename, hook_path_rename),
LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
LSM_HOOK_INIT(path_mknod, hook_path_mknod),
LSM_HOOK_INIT(path_symlink, hook_path_symlink),
LSM_HOOK_INIT(path_unlink, hook_path_unlink),
LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
LSM_HOOK_INIT(path_truncate, hook_path_truncate),
LSM_HOOK_INIT(file_alloc_security, hook_file_alloc_security),
LSM_HOOK_INIT(file_open, hook_file_open),
LSM_HOOK_INIT(file_truncate, hook_file_truncate),
};
__init void landlock_add_fs_hooks(void)
{
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
LANDLOCK_NAME);
}
| linux-master | security/landlock/fs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Landlock LSM - Security framework setup
*
* Copyright © 2016-2020 Mickaël Salaün <[email protected]>
* Copyright © 2018-2020 ANSSI
*/
#include <linux/init.h>
#include <linux/lsm_hooks.h>
#include "common.h"
#include "cred.h"
#include "fs.h"
#include "ptrace.h"
#include "setup.h"
bool landlock_initialized __ro_after_init = false;
struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = {
.lbs_cred = sizeof(struct landlock_cred_security),
.lbs_file = sizeof(struct landlock_file_security),
.lbs_inode = sizeof(struct landlock_inode_security),
.lbs_superblock = sizeof(struct landlock_superblock_security),
};
static int __init landlock_init(void)
{
landlock_add_cred_hooks();
landlock_add_ptrace_hooks();
landlock_add_fs_hooks();
landlock_initialized = true;
pr_info("Up and running.\n");
return 0;
}
DEFINE_LSM(LANDLOCK_NAME) = {
.name = LANDLOCK_NAME,
.init = landlock_init,
.blobs = &landlock_blob_sizes,
};
| linux-master | security/landlock/setup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Landlock LSM - Ruleset management
*
* Copyright © 2016-2020 Mickaël Salaün <[email protected]>
* Copyright © 2018-2020 ANSSI
*/
#include <linux/bits.h>
#include <linux/bug.h>
#include <linux/compiler_types.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/overflow.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include "limits.h"
#include "object.h"
#include "ruleset.h"
static struct landlock_ruleset *create_ruleset(const u32 num_layers)
{
struct landlock_ruleset *new_ruleset;
new_ruleset =
kzalloc(struct_size(new_ruleset, fs_access_masks, num_layers),
GFP_KERNEL_ACCOUNT);
if (!new_ruleset)
return ERR_PTR(-ENOMEM);
refcount_set(&new_ruleset->usage, 1);
mutex_init(&new_ruleset->lock);
new_ruleset->root = RB_ROOT;
new_ruleset->num_layers = num_layers;
/*
* hierarchy = NULL
* num_rules = 0
* fs_access_masks[] = 0
*/
return new_ruleset;
}
struct landlock_ruleset *
landlock_create_ruleset(const access_mask_t fs_access_mask)
{
struct landlock_ruleset *new_ruleset;
/* Informs about useless ruleset. */
if (!fs_access_mask)
return ERR_PTR(-ENOMSG);
new_ruleset = create_ruleset(1);
if (!IS_ERR(new_ruleset))
new_ruleset->fs_access_masks[0] = fs_access_mask;
return new_ruleset;
}
static void build_check_rule(void)
{
const struct landlock_rule rule = {
.num_layers = ~0,
};
BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
}
static struct landlock_rule *
create_rule(struct landlock_object *const object,
const struct landlock_layer (*const layers)[], const u32 num_layers,
const struct landlock_layer *const new_layer)
{
struct landlock_rule *new_rule;
u32 new_num_layers;
build_check_rule();
if (new_layer) {
/* Should already be checked by landlock_merge_ruleset(). */
if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
return ERR_PTR(-E2BIG);
new_num_layers = num_layers + 1;
} else {
new_num_layers = num_layers;
}
new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
GFP_KERNEL_ACCOUNT);
if (!new_rule)
return ERR_PTR(-ENOMEM);
RB_CLEAR_NODE(&new_rule->node);
landlock_get_object(object);
new_rule->object = object;
new_rule->num_layers = new_num_layers;
/* Copies the original layer stack. */
memcpy(new_rule->layers, layers,
flex_array_size(new_rule, layers, num_layers));
if (new_layer)
/* Adds a copy of @new_layer on the layer stack. */
new_rule->layers[new_rule->num_layers - 1] = *new_layer;
return new_rule;
}
static void free_rule(struct landlock_rule *const rule)
{
might_sleep();
if (!rule)
return;
landlock_put_object(rule->object);
kfree(rule);
}
static void build_check_ruleset(void)
{
const struct landlock_ruleset ruleset = {
.num_rules = ~0,
.num_layers = ~0,
};
typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0;
BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS);
}
/**
* insert_rule - Create and insert a rule in a ruleset
*
* @ruleset: The ruleset to be updated.
* @object: The object to build the new rule with. The underlying kernel
* object must be held by the caller.
* @layers: One or multiple layers to be copied into the new rule.
* @num_layers: The number of @layers entries.
*
* When user space requests to add a new rule to a ruleset, @layers only
* contains one entry and this entry is not assigned to any level. In this
* case, the new rule will extend @ruleset, similarly to a boolean OR between
* access rights.
*
* When merging a ruleset in a domain, or copying a domain, @layers will be
* added to @ruleset as new constraints, similarly to a boolean AND between
* access rights.
*/
static int insert_rule(struct landlock_ruleset *const ruleset,
struct landlock_object *const object,
const struct landlock_layer (*const layers)[],
size_t num_layers)
{
struct rb_node **walker_node;
struct rb_node *parent_node = NULL;
struct landlock_rule *new_rule;
might_sleep();
lockdep_assert_held(&ruleset->lock);
if (WARN_ON_ONCE(!object || !layers))
return -ENOENT;
walker_node = &(ruleset->root.rb_node);
while (*walker_node) {
struct landlock_rule *const this =
rb_entry(*walker_node, struct landlock_rule, node);
if (this->object != object) {
parent_node = *walker_node;
if (this->object < object)
walker_node = &((*walker_node)->rb_right);
else
walker_node = &((*walker_node)->rb_left);
continue;
}
/* Only a single-level layer should match an existing rule. */
if (WARN_ON_ONCE(num_layers != 1))
return -EINVAL;
/* If there is a matching rule, updates it. */
if ((*layers)[0].level == 0) {
/*
* Extends access rights when the request comes from
* landlock_add_rule(2), i.e. @ruleset is not a domain.
*/
if (WARN_ON_ONCE(this->num_layers != 1))
return -EINVAL;
if (WARN_ON_ONCE(this->layers[0].level != 0))
return -EINVAL;
this->layers[0].access |= (*layers)[0].access;
return 0;
}
if (WARN_ON_ONCE(this->layers[0].level == 0))
return -EINVAL;
/*
* Intersects access rights when it is a merge between a
* ruleset and a domain.
*/
new_rule = create_rule(object, &this->layers, this->num_layers,
&(*layers)[0]);
if (IS_ERR(new_rule))
return PTR_ERR(new_rule);
rb_replace_node(&this->node, &new_rule->node, &ruleset->root);
free_rule(this);
return 0;
}
/* There is no match for @object. */
build_check_ruleset();
if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
return -E2BIG;
new_rule = create_rule(object, layers, num_layers, NULL);
if (IS_ERR(new_rule))
return PTR_ERR(new_rule);
rb_link_node(&new_rule->node, parent_node, walker_node);
rb_insert_color(&new_rule->node, &ruleset->root);
ruleset->num_rules++;
return 0;
}
static void build_check_layer(void)
{
const struct landlock_layer layer = {
.level = ~0,
.access = ~0,
};
BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
}
/* @ruleset must be locked by the caller. */
int landlock_insert_rule(struct landlock_ruleset *const ruleset,
struct landlock_object *const object,
const access_mask_t access)
{
struct landlock_layer layers[] = { {
.access = access,
/* When @level is zero, insert_rule() extends @ruleset. */
.level = 0,
} };
build_check_layer();
return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers));
}
static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy)
{
if (hierarchy)
refcount_inc(&hierarchy->usage);
}
static void put_hierarchy(struct landlock_hierarchy *hierarchy)
{
while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
const struct landlock_hierarchy *const freeme = hierarchy;
hierarchy = hierarchy->parent;
kfree(freeme);
}
}
static int merge_ruleset(struct landlock_ruleset *const dst,
struct landlock_ruleset *const src)
{
struct landlock_rule *walker_rule, *next_rule;
int err = 0;
might_sleep();
/* Should already be checked by landlock_merge_ruleset() */
if (WARN_ON_ONCE(!src))
return 0;
/* Only merge into a domain. */
if (WARN_ON_ONCE(!dst || !dst->hierarchy))
return -EINVAL;
/* Locks @dst first because we are its only owner. */
mutex_lock(&dst->lock);
mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
/* Stacks the new layer. */
if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
err = -EINVAL;
goto out_unlock;
}
dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0];
/* Merges the @src tree. */
rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, &src->root,
node) {
struct landlock_layer layers[] = { {
.level = dst->num_layers,
} };
if (WARN_ON_ONCE(walker_rule->num_layers != 1)) {
err = -EINVAL;
goto out_unlock;
}
if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) {
err = -EINVAL;
goto out_unlock;
}
layers[0].access = walker_rule->layers[0].access;
err = insert_rule(dst, walker_rule->object, &layers,
ARRAY_SIZE(layers));
if (err)
goto out_unlock;
}
out_unlock:
mutex_unlock(&src->lock);
mutex_unlock(&dst->lock);
return err;
}
static int inherit_ruleset(struct landlock_ruleset *const parent,
struct landlock_ruleset *const child)
{
struct landlock_rule *walker_rule, *next_rule;
int err = 0;
might_sleep();
if (!parent)
return 0;
/* Locks @child first because we are its only owner. */
mutex_lock(&child->lock);
mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
/* Copies the @parent tree. */
rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
&parent->root, node) {
err = insert_rule(child, walker_rule->object,
&walker_rule->layers,
walker_rule->num_layers);
if (err)
goto out_unlock;
}
if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
err = -EINVAL;
goto out_unlock;
}
/* Copies the parent layer stack and leaves a space for the new layer. */
memcpy(child->fs_access_masks, parent->fs_access_masks,
flex_array_size(parent, fs_access_masks, parent->num_layers));
if (WARN_ON_ONCE(!parent->hierarchy)) {
err = -EINVAL;
goto out_unlock;
}
get_hierarchy(parent->hierarchy);
child->hierarchy->parent = parent->hierarchy;
out_unlock:
mutex_unlock(&parent->lock);
mutex_unlock(&child->lock);
return err;
}
static void free_ruleset(struct landlock_ruleset *const ruleset)
{
struct landlock_rule *freeme, *next;
might_sleep();
rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, node)
free_rule(freeme);
put_hierarchy(ruleset->hierarchy);
kfree(ruleset);
}
void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
{
might_sleep();
if (ruleset && refcount_dec_and_test(&ruleset->usage))
free_ruleset(ruleset);
}
static void free_ruleset_work(struct work_struct *const work)
{
struct landlock_ruleset *ruleset;
ruleset = container_of(work, struct landlock_ruleset, work_free);
free_ruleset(ruleset);
}
void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
{
if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
INIT_WORK(&ruleset->work_free, free_ruleset_work);
schedule_work(&ruleset->work_free);
}
}
/**
* landlock_merge_ruleset - Merge a ruleset with a domain
*
* @parent: Parent domain.
* @ruleset: New ruleset to be merged.
*
* Returns the intersection of @parent and @ruleset, or returns @parent if
* @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
*/
struct landlock_ruleset *
landlock_merge_ruleset(struct landlock_ruleset *const parent,
struct landlock_ruleset *const ruleset)
{
struct landlock_ruleset *new_dom;
u32 num_layers;
int err;
might_sleep();
if (WARN_ON_ONCE(!ruleset || parent == ruleset))
return ERR_PTR(-EINVAL);
if (parent) {
if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
return ERR_PTR(-E2BIG);
num_layers = parent->num_layers + 1;
} else {
num_layers = 1;
}
/* Creates a new domain... */
new_dom = create_ruleset(num_layers);
if (IS_ERR(new_dom))
return new_dom;
new_dom->hierarchy =
kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
if (!new_dom->hierarchy) {
err = -ENOMEM;
goto out_put_dom;
}
refcount_set(&new_dom->hierarchy->usage, 1);
/* ...as a child of @parent... */
err = inherit_ruleset(parent, new_dom);
if (err)
goto out_put_dom;
/* ...and including @ruleset. */
err = merge_ruleset(new_dom, ruleset);
if (err)
goto out_put_dom;
return new_dom;
out_put_dom:
landlock_put_ruleset(new_dom);
return ERR_PTR(err);
}
/*
* The returned access has the same lifetime as @ruleset.
*/
const struct landlock_rule *
landlock_find_rule(const struct landlock_ruleset *const ruleset,
const struct landlock_object *const object)
{
const struct rb_node *node;
if (!object)
return NULL;
node = ruleset->root.rb_node;
while (node) {
struct landlock_rule *this =
rb_entry(node, struct landlock_rule, node);
if (this->object == object)
return this;
if (this->object < object)
node = node->rb_right;
else
node = node->rb_left;
}
return NULL;
}
| linux-master | security/landlock/ruleset.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Landlock LSM - Credential hooks
*
* Copyright © 2017-2020 Mickaël Salaün <[email protected]>
* Copyright © 2018-2020 ANSSI
*/
#include <linux/cred.h>
#include <linux/lsm_hooks.h>
#include "common.h"
#include "cred.h"
#include "ruleset.h"
#include "setup.h"
static int hook_cred_prepare(struct cred *const new,
const struct cred *const old, const gfp_t gfp)
{
struct landlock_ruleset *const old_dom = landlock_cred(old)->domain;
if (old_dom) {
landlock_get_ruleset(old_dom);
landlock_cred(new)->domain = old_dom;
}
return 0;
}
static void hook_cred_free(struct cred *const cred)
{
struct landlock_ruleset *const dom = landlock_cred(cred)->domain;
if (dom)
landlock_put_ruleset_deferred(dom);
}
static struct security_hook_list landlock_hooks[] __ro_after_init = {
LSM_HOOK_INIT(cred_prepare, hook_cred_prepare),
LSM_HOOK_INIT(cred_free, hook_cred_free),
};
__init void landlock_add_cred_hooks(void)
{
security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
LANDLOCK_NAME);
}
| linux-master | security/landlock/cred.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Yama Linux Security Module
*
* Author: Kees Cook <[email protected]>
*
* Copyright (C) 2010 Canonical, Ltd.
* Copyright (C) 2011 The Chromium OS Authors.
*/
#include <linux/lsm_hooks.h>
#include <linux/sysctl.h>
#include <linux/ptrace.h>
#include <linux/prctl.h>
#include <linux/ratelimit.h>
#include <linux/workqueue.h>
#include <linux/string_helpers.h>
#include <linux/task_work.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#define YAMA_SCOPE_DISABLED 0
#define YAMA_SCOPE_RELATIONAL 1
#define YAMA_SCOPE_CAPABILITY 2
#define YAMA_SCOPE_NO_ATTACH 3
static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
/* describe a ptrace relationship for potential exception */
struct ptrace_relation {
struct task_struct *tracer;
struct task_struct *tracee;
bool invalid;
struct list_head node;
struct rcu_head rcu;
};
static LIST_HEAD(ptracer_relations);
static DEFINE_SPINLOCK(ptracer_relations_lock);
static void yama_relation_cleanup(struct work_struct *work);
static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
struct access_report_info {
struct callback_head work;
const char *access;
struct task_struct *target;
struct task_struct *agent;
};
static void __report_access(struct callback_head *work)
{
struct access_report_info *info =
container_of(work, struct access_report_info, work);
char *target_cmd, *agent_cmd;
target_cmd = kstrdup_quotable_cmdline(info->target, GFP_KERNEL);
agent_cmd = kstrdup_quotable_cmdline(info->agent, GFP_KERNEL);
pr_notice_ratelimited(
"ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
info->access, target_cmd, info->target->pid, agent_cmd,
info->agent->pid);
kfree(agent_cmd);
kfree(target_cmd);
put_task_struct(info->agent);
put_task_struct(info->target);
kfree(info);
}
/* defers execution because cmdline access can sleep */
static void report_access(const char *access, struct task_struct *target,
struct task_struct *agent)
{
struct access_report_info *info;
char agent_comm[sizeof(agent->comm)];
assert_spin_locked(&target->alloc_lock); /* for target->comm */
if (current->flags & PF_KTHREAD) {
/* I don't think kthreads call task_work_run() before exiting.
* Imagine angry ranting about procfs here.
*/
pr_notice_ratelimited(
"ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
access, target->comm, target->pid,
get_task_comm(agent_comm, agent), agent->pid);
return;
}
info = kmalloc(sizeof(*info), GFP_ATOMIC);
if (!info)
return;
init_task_work(&info->work, __report_access);
get_task_struct(target);
get_task_struct(agent);
info->access = access;
info->target = target;
info->agent = agent;
if (task_work_add(current, &info->work, TWA_RESUME) == 0)
return; /* success */
WARN(1, "report_access called from exiting task");
put_task_struct(target);
put_task_struct(agent);
kfree(info);
}
/**
* yama_relation_cleanup - remove invalid entries from the relation list
*
*/
static void yama_relation_cleanup(struct work_struct *work)
{
struct ptrace_relation *relation;
spin_lock(&ptracer_relations_lock);
rcu_read_lock();
list_for_each_entry_rcu(relation, &ptracer_relations, node) {
if (relation->invalid) {
list_del_rcu(&relation->node);
kfree_rcu(relation, rcu);
}
}
rcu_read_unlock();
spin_unlock(&ptracer_relations_lock);
}
/**
* yama_ptracer_add - add/replace an exception for this tracer/tracee pair
* @tracer: the task_struct of the process doing the ptrace
* @tracee: the task_struct of the process to be ptraced
*
* Each tracee can have, at most, one tracer registered. Each time this
* is called, the prior registered tracer will be replaced for the tracee.
*
* Returns 0 if relationship was added, -ve on error.
*/
static int yama_ptracer_add(struct task_struct *tracer,
struct task_struct *tracee)
{
struct ptrace_relation *relation, *added;
added = kmalloc(sizeof(*added), GFP_KERNEL);
if (!added)
return -ENOMEM;
added->tracee = tracee;
added->tracer = tracer;
added->invalid = false;
spin_lock(&ptracer_relations_lock);
rcu_read_lock();
list_for_each_entry_rcu(relation, &ptracer_relations, node) {
if (relation->invalid)
continue;
if (relation->tracee == tracee) {
list_replace_rcu(&relation->node, &added->node);
kfree_rcu(relation, rcu);
goto out;
}
}
list_add_rcu(&added->node, &ptracer_relations);
out:
rcu_read_unlock();
spin_unlock(&ptracer_relations_lock);
return 0;
}
/**
* yama_ptracer_del - remove exceptions related to the given tasks
* @tracer: remove any relation where tracer task matches
* @tracee: remove any relation where tracee task matches
*/
static void yama_ptracer_del(struct task_struct *tracer,
struct task_struct *tracee)
{
struct ptrace_relation *relation;
bool marked = false;
rcu_read_lock();
list_for_each_entry_rcu(relation, &ptracer_relations, node) {
if (relation->invalid)
continue;
if (relation->tracee == tracee ||
(tracer && relation->tracer == tracer)) {
relation->invalid = true;
marked = true;
}
}
rcu_read_unlock();
if (marked)
schedule_work(&yama_relation_work);
}
/**
* yama_task_free - check for task_pid to remove from exception list
* @task: task being removed
*/
static void yama_task_free(struct task_struct *task)
{
yama_ptracer_del(task, task);
}
/**
* yama_task_prctl - check for Yama-specific prctl operations
* @option: operation
* @arg2: argument
* @arg3: argument
* @arg4: argument
* @arg5: argument
*
* Return 0 on success, -ve on error. -ENOSYS is returned when Yama
* does not handle the given option.
*/
static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
int rc = -ENOSYS;
struct task_struct *myself = current;
switch (option) {
case PR_SET_PTRACER:
/* Since a thread can call prctl(), find the group leader
* before calling _add() or _del() on it, since we want
* process-level granularity of control. The tracer group
* leader checking is handled later when walking the ancestry
* at the time of PTRACE_ATTACH check.
*/
rcu_read_lock();
if (!thread_group_leader(myself))
myself = rcu_dereference(myself->group_leader);
get_task_struct(myself);
rcu_read_unlock();
if (arg2 == 0) {
yama_ptracer_del(NULL, myself);
rc = 0;
} else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
rc = yama_ptracer_add(NULL, myself);
} else {
struct task_struct *tracer;
tracer = find_get_task_by_vpid(arg2);
if (!tracer) {
rc = -EINVAL;
} else {
rc = yama_ptracer_add(tracer, myself);
put_task_struct(tracer);
}
}
put_task_struct(myself);
break;
}
return rc;
}
/**
* task_is_descendant - walk up a process family tree looking for a match
* @parent: the process to compare against while walking up from child
* @child: the process to start from while looking upwards for parent
*
* Returns 1 if child is a descendant of parent, 0 if not.
*/
static int task_is_descendant(struct task_struct *parent,
struct task_struct *child)
{
int rc = 0;
struct task_struct *walker = child;
if (!parent || !child)
return 0;
rcu_read_lock();
if (!thread_group_leader(parent))
parent = rcu_dereference(parent->group_leader);
while (walker->pid > 0) {
if (!thread_group_leader(walker))
walker = rcu_dereference(walker->group_leader);
if (walker == parent) {
rc = 1;
break;
}
walker = rcu_dereference(walker->real_parent);
}
rcu_read_unlock();
return rc;
}
/**
* ptracer_exception_found - tracer registered as exception for this tracee
* @tracer: the task_struct of the process attempting ptrace
* @tracee: the task_struct of the process to be ptraced
*
* Returns 1 if tracer has a ptracer exception ancestor for tracee.
*/
static int ptracer_exception_found(struct task_struct *tracer,
struct task_struct *tracee)
{
int rc = 0;
struct ptrace_relation *relation;
struct task_struct *parent = NULL;
bool found = false;
rcu_read_lock();
/*
* If there's already an active tracing relationship, then make an
* exception for the sake of other accesses, like process_vm_rw().
*/
parent = ptrace_parent(tracee);
if (parent != NULL && same_thread_group(parent, tracer)) {
rc = 1;
goto unlock;
}
/* Look for a PR_SET_PTRACER relationship. */
if (!thread_group_leader(tracee))
tracee = rcu_dereference(tracee->group_leader);
list_for_each_entry_rcu(relation, &ptracer_relations, node) {
if (relation->invalid)
continue;
if (relation->tracee == tracee) {
parent = relation->tracer;
found = true;
break;
}
}
if (found && (parent == NULL || task_is_descendant(parent, tracer)))
rc = 1;
unlock:
rcu_read_unlock();
return rc;
}
/**
* yama_ptrace_access_check - validate PTRACE_ATTACH calls
* @child: task that current task is attempting to ptrace
* @mode: ptrace attach mode
*
* Returns 0 if following the ptrace is allowed, -ve on error.
*/
static int yama_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
int rc = 0;
/* require ptrace target be a child of ptracer on attach */
if (mode & PTRACE_MODE_ATTACH) {
switch (ptrace_scope) {
case YAMA_SCOPE_DISABLED:
/* No additional restrictions. */
break;
case YAMA_SCOPE_RELATIONAL:
rcu_read_lock();
if (!pid_alive(child))
rc = -EPERM;
if (!rc && !task_is_descendant(current, child) &&
!ptracer_exception_found(current, child) &&
!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
rcu_read_unlock();
break;
case YAMA_SCOPE_CAPABILITY:
rcu_read_lock();
if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
rcu_read_unlock();
break;
case YAMA_SCOPE_NO_ATTACH:
default:
rc = -EPERM;
break;
}
}
if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0)
report_access("attach", child, current);
return rc;
}
/**
* yama_ptrace_traceme - validate PTRACE_TRACEME calls
* @parent: task that will become the ptracer of the current task
*
* Returns 0 if following the ptrace is allowed, -ve on error.
*/
static int yama_ptrace_traceme(struct task_struct *parent)
{
int rc = 0;
/* Only disallow PTRACE_TRACEME on more aggressive settings. */
switch (ptrace_scope) {
case YAMA_SCOPE_CAPABILITY:
if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
rc = -EPERM;
break;
case YAMA_SCOPE_NO_ATTACH:
rc = -EPERM;
break;
}
if (rc) {
task_lock(current);
report_access("traceme", current, parent);
task_unlock(current);
}
return rc;
}
static struct security_hook_list yama_hooks[] __ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, yama_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, yama_ptrace_traceme),
LSM_HOOK_INIT(task_prctl, yama_task_prctl),
LSM_HOOK_INIT(task_free, yama_task_free),
};
#ifdef CONFIG_SYSCTL
static int yama_dointvec_minmax(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table table_copy;
if (write && !capable(CAP_SYS_PTRACE))
return -EPERM;
/* Lock the max value if it ever gets set. */
table_copy = *table;
if (*(int *)table_copy.data == *(int *)table_copy.extra2)
table_copy.extra1 = table_copy.extra2;
return proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos);
}
static int max_scope = YAMA_SCOPE_NO_ATTACH;
static struct ctl_table yama_sysctl_table[] = {
{
.procname = "ptrace_scope",
.data = &ptrace_scope,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = yama_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = &max_scope,
},
{ }
};
static void __init yama_init_sysctl(void)
{
if (!register_sysctl("kernel/yama", yama_sysctl_table))
panic("Yama: sysctl registration failed.\n");
}
#else
static inline void yama_init_sysctl(void) { }
#endif /* CONFIG_SYSCTL */
static int __init yama_init(void)
{
pr_info("Yama: becoming mindful.\n");
security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks), "yama");
yama_init_sysctl();
return 0;
}
DEFINE_LSM(yama) = {
.name = "yama",
.init = yama_init,
};
| linux-master | security/yama/yama_lsm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor dfa based regular expression matching engine
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2012 Canonical Ltd.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/err.h>
#include <linux/kref.h>
#include "include/lib.h"
#include "include/match.h"
#define base_idx(X) ((X) & 0xffffff)
static char nulldfa_src[] = {
#include "nulldfa.in"
};
struct aa_dfa *nulldfa;
static char stacksplitdfa_src[] = {
#include "stacksplitdfa.in"
};
struct aa_dfa *stacksplitdfa;
int __init aa_setup_dfa_engine(void)
{
int error;
nulldfa = aa_dfa_unpack(nulldfa_src, sizeof(nulldfa_src),
TO_ACCEPT1_FLAG(YYTD_DATA32) |
TO_ACCEPT2_FLAG(YYTD_DATA32));
if (IS_ERR(nulldfa)) {
error = PTR_ERR(nulldfa);
nulldfa = NULL;
return error;
}
stacksplitdfa = aa_dfa_unpack(stacksplitdfa_src,
sizeof(stacksplitdfa_src),
TO_ACCEPT1_FLAG(YYTD_DATA32) |
TO_ACCEPT2_FLAG(YYTD_DATA32));
if (IS_ERR(stacksplitdfa)) {
aa_put_dfa(nulldfa);
nulldfa = NULL;
error = PTR_ERR(stacksplitdfa);
stacksplitdfa = NULL;
return error;
}
return 0;
}
void __init aa_teardown_dfa_engine(void)
{
aa_put_dfa(stacksplitdfa);
aa_put_dfa(nulldfa);
}
/**
* unpack_table - unpack a dfa table (one of accept, default, base, next check)
* @blob: data to unpack (NOT NULL)
* @bsize: size of blob
*
* Returns: pointer to table else NULL on failure
*
* NOTE: must be freed by kvfree (not kfree)
*/
static struct table_header *unpack_table(char *blob, size_t bsize)
{
struct table_header *table = NULL;
struct table_header th;
size_t tsize;
if (bsize < sizeof(struct table_header))
goto out;
/* loaded td_id's start at 1, subtract 1 now to avoid doing
* it every time we use td_id as an index
*/
th.td_id = be16_to_cpu(*(__be16 *) (blob)) - 1;
if (th.td_id > YYTD_ID_MAX)
goto out;
th.td_flags = be16_to_cpu(*(__be16 *) (blob + 2));
th.td_lolen = be32_to_cpu(*(__be32 *) (blob + 8));
blob += sizeof(struct table_header);
if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
th.td_flags == YYTD_DATA8))
goto out;
/* if we have a table it must have some entries */
if (th.td_lolen == 0)
goto out;
tsize = table_size(th.td_lolen, th.td_flags);
if (bsize < tsize)
goto out;
table = kvzalloc(tsize, GFP_KERNEL);
if (table) {
table->td_id = th.td_id;
table->td_flags = th.td_flags;
table->td_lolen = th.td_lolen;
if (th.td_flags == YYTD_DATA8)
UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
u8, u8, byte_to_byte);
else if (th.td_flags == YYTD_DATA16)
UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
u16, __be16, be16_to_cpu);
else if (th.td_flags == YYTD_DATA32)
UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
u32, __be32, be32_to_cpu);
else
goto fail;
/* if table was vmalloced make sure the page tables are synced
* before it is used, as it goes live to all cpus.
*/
if (is_vmalloc_addr(table))
vm_unmap_aliases();
}
out:
return table;
fail:
kvfree(table);
return NULL;
}
/**
* verify_table_headers - verify that the tables headers are as expected
* @tables - array of dfa tables to check (NOT NULL)
* @flags: flags controlling what type of accept table are acceptable
*
* Assumes dfa has gone through the first pass verification done by unpacking
* NOTE: this does not valid accept table values
*
* Returns: %0 else error code on failure to verify
*/
static int verify_table_headers(struct table_header **tables, int flags)
{
size_t state_count, trans_count;
int error = -EPROTO;
/* check that required tables exist */
if (!(tables[YYTD_ID_DEF] && tables[YYTD_ID_BASE] &&
tables[YYTD_ID_NXT] && tables[YYTD_ID_CHK]))
goto out;
/* accept.size == default.size == base.size */
state_count = tables[YYTD_ID_BASE]->td_lolen;
if (ACCEPT1_FLAGS(flags)) {
if (!tables[YYTD_ID_ACCEPT])
goto out;
if (state_count != tables[YYTD_ID_ACCEPT]->td_lolen)
goto out;
}
if (ACCEPT2_FLAGS(flags)) {
if (!tables[YYTD_ID_ACCEPT2])
goto out;
if (state_count != tables[YYTD_ID_ACCEPT2]->td_lolen)
goto out;
}
if (state_count != tables[YYTD_ID_DEF]->td_lolen)
goto out;
/* next.size == chk.size */
trans_count = tables[YYTD_ID_NXT]->td_lolen;
if (trans_count != tables[YYTD_ID_CHK]->td_lolen)
goto out;
/* if equivalence classes then its table size must be 256 */
if (tables[YYTD_ID_EC] && tables[YYTD_ID_EC]->td_lolen != 256)
goto out;
error = 0;
out:
return error;
}
/**
* verify_dfa - verify that transitions and states in the tables are in bounds.
* @dfa: dfa to test (NOT NULL)
*
* Assumes dfa has gone through the first pass verification done by unpacking
* NOTE: this does not valid accept table values
*
* Returns: %0 else error code on failure to verify
*/
static int verify_dfa(struct aa_dfa *dfa)
{
size_t i, state_count, trans_count;
int error = -EPROTO;
state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen;
if (state_count == 0)
goto out;
for (i = 0; i < state_count; i++) {
if (!(BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE) &&
(DEFAULT_TABLE(dfa)[i] >= state_count))
goto out;
if (BASE_TABLE(dfa)[i] & MATCH_FLAGS_INVALID) {
pr_err("AppArmor DFA state with invalid match flags");
goto out;
}
if ((BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE)) {
if (!(dfa->flags & YYTH_FLAG_DIFF_ENCODE)) {
pr_err("AppArmor DFA diff encoded transition state without header flag");
goto out;
}
}
if ((BASE_TABLE(dfa)[i] & MATCH_FLAG_OOB_TRANSITION)) {
if (base_idx(BASE_TABLE(dfa)[i]) < dfa->max_oob) {
pr_err("AppArmor DFA out of bad transition out of range");
goto out;
}
if (!(dfa->flags & YYTH_FLAG_OOB_TRANS)) {
pr_err("AppArmor DFA out of bad transition state without header flag");
goto out;
}
}
if (base_idx(BASE_TABLE(dfa)[i]) + 255 >= trans_count) {
pr_err("AppArmor DFA next/check upper bounds error\n");
goto out;
}
}
for (i = 0; i < trans_count; i++) {
if (NEXT_TABLE(dfa)[i] >= state_count)
goto out;
if (CHECK_TABLE(dfa)[i] >= state_count)
goto out;
}
/* Now that all the other tables are verified, verify diffencoding */
for (i = 0; i < state_count; i++) {
size_t j, k;
for (j = i;
(BASE_TABLE(dfa)[j] & MATCH_FLAG_DIFF_ENCODE) &&
!(BASE_TABLE(dfa)[j] & MARK_DIFF_ENCODE);
j = k) {
k = DEFAULT_TABLE(dfa)[j];
if (j == k)
goto out;
if (k < j)
break; /* already verified */
BASE_TABLE(dfa)[j] |= MARK_DIFF_ENCODE;
}
}
error = 0;
out:
return error;
}
/**
* dfa_free - free a dfa allocated by aa_dfa_unpack
* @dfa: the dfa to free (MAYBE NULL)
*
* Requires: reference count to dfa == 0
*/
static void dfa_free(struct aa_dfa *dfa)
{
if (dfa) {
int i;
for (i = 0; i < ARRAY_SIZE(dfa->tables); i++) {
kvfree(dfa->tables[i]);
dfa->tables[i] = NULL;
}
kfree(dfa);
}
}
/**
* aa_dfa_free_kref - free aa_dfa by kref (called by aa_put_dfa)
* @kr: kref callback for freeing of a dfa (NOT NULL)
*/
void aa_dfa_free_kref(struct kref *kref)
{
struct aa_dfa *dfa = container_of(kref, struct aa_dfa, count);
dfa_free(dfa);
}
/**
* aa_dfa_unpack - unpack the binary tables of a serialized dfa
* @blob: aligned serialized stream of data to unpack (NOT NULL)
* @size: size of data to unpack
* @flags: flags controlling what type of accept tables are acceptable
*
* Unpack a dfa that has been serialized. To find information on the dfa
* format look in Documentation/admin-guide/LSM/apparmor.rst
* Assumes the dfa @blob stream has been aligned on a 8 byte boundary
*
* Returns: an unpacked dfa ready for matching or ERR_PTR on failure
*/
struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags)
{
int hsize;
int error = -ENOMEM;
char *data = blob;
struct table_header *table = NULL;
struct aa_dfa *dfa = kzalloc(sizeof(struct aa_dfa), GFP_KERNEL);
if (!dfa)
goto fail;
kref_init(&dfa->count);
error = -EPROTO;
/* get dfa table set header */
if (size < sizeof(struct table_set_header))
goto fail;
if (ntohl(*(__be32 *) data) != YYTH_MAGIC)
goto fail;
hsize = ntohl(*(__be32 *) (data + 4));
if (size < hsize)
goto fail;
dfa->flags = ntohs(*(__be16 *) (data + 12));
if (dfa->flags & ~(YYTH_FLAGS))
goto fail;
/*
* TODO: needed for dfa to support more than 1 oob
* if (dfa->flags & YYTH_FLAGS_OOB_TRANS) {
* if (hsize < 16 + 4)
* goto fail;
* dfa->max_oob = ntol(*(__be32 *) (data + 16));
* if (dfa->max <= MAX_OOB_SUPPORTED) {
* pr_err("AppArmor DFA OOB greater than supported\n");
* goto fail;
* }
* }
*/
dfa->max_oob = 1;
data += hsize;
size -= hsize;
while (size > 0) {
table = unpack_table(data, size);
if (!table)
goto fail;
switch (table->td_id) {
case YYTD_ID_ACCEPT:
if (!(table->td_flags & ACCEPT1_FLAGS(flags)))
goto fail;
break;
case YYTD_ID_ACCEPT2:
if (!(table->td_flags & ACCEPT2_FLAGS(flags)))
goto fail;
break;
case YYTD_ID_BASE:
if (table->td_flags != YYTD_DATA32)
goto fail;
break;
case YYTD_ID_DEF:
case YYTD_ID_NXT:
case YYTD_ID_CHK:
if (table->td_flags != YYTD_DATA16)
goto fail;
break;
case YYTD_ID_EC:
if (table->td_flags != YYTD_DATA8)
goto fail;
break;
default:
goto fail;
}
/* check for duplicate table entry */
if (dfa->tables[table->td_id])
goto fail;
dfa->tables[table->td_id] = table;
data += table_size(table->td_lolen, table->td_flags);
size -= table_size(table->td_lolen, table->td_flags);
table = NULL;
}
error = verify_table_headers(dfa->tables, flags);
if (error)
goto fail;
if (flags & DFA_FLAG_VERIFY_STATES) {
error = verify_dfa(dfa);
if (error)
goto fail;
}
return dfa;
fail:
kvfree(table);
dfa_free(dfa);
return ERR_PTR(error);
}
#define match_char(state, def, base, next, check, C) \
do { \
u32 b = (base)[(state)]; \
unsigned int pos = base_idx(b) + (C); \
if ((check)[pos] != (state)) { \
(state) = (def)[(state)]; \
if (b & MATCH_FLAG_DIFF_ENCODE) \
continue; \
break; \
} \
(state) = (next)[pos]; \
break; \
} while (1)
/**
* aa_dfa_match_len - traverse @dfa to find state @str stops at
* @dfa: the dfa to match @str against (NOT NULL)
* @start: the state of the dfa to start matching in
* @str: the string of bytes to match against the dfa (NOT NULL)
* @len: length of the string of bytes to match
*
* aa_dfa_match_len will match @str against the dfa and return the state it
* finished matching in. The final state can be used to look up the accepting
* label, or as the start state of a continuing match.
*
* This function will happily match again the 0 byte and only finishes
* when @len input is consumed.
*
* Returns: final state reached after input is consumed
*/
aa_state_t aa_dfa_match_len(struct aa_dfa *dfa, aa_state_t start,
const char *str, int len)
{
u16 *def = DEFAULT_TABLE(dfa);
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
aa_state_t state = start;
if (state == DFA_NOMATCH)
return DFA_NOMATCH;
/* current state is <state>, matching character *str */
if (dfa->tables[YYTD_ID_EC]) {
/* Equivalence class table defined */
u8 *equiv = EQUIV_TABLE(dfa);
for (; len; len--)
match_char(state, def, base, next, check,
equiv[(u8) *str++]);
} else {
/* default is direct to next state */
for (; len; len--)
match_char(state, def, base, next, check, (u8) *str++);
}
return state;
}
/**
* aa_dfa_match - traverse @dfa to find state @str stops at
* @dfa: the dfa to match @str against (NOT NULL)
* @start: the state of the dfa to start matching in
* @str: the null terminated string of bytes to match against the dfa (NOT NULL)
*
* aa_dfa_match will match @str against the dfa and return the state it
* finished matching in. The final state can be used to look up the accepting
* label, or as the start state of a continuing match.
*
* Returns: final state reached after input is consumed
*/
aa_state_t aa_dfa_match(struct aa_dfa *dfa, aa_state_t start, const char *str)
{
u16 *def = DEFAULT_TABLE(dfa);
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
aa_state_t state = start;
if (state == DFA_NOMATCH)
return DFA_NOMATCH;
/* current state is <state>, matching character *str */
if (dfa->tables[YYTD_ID_EC]) {
/* Equivalence class table defined */
u8 *equiv = EQUIV_TABLE(dfa);
/* default is direct to next state */
while (*str)
match_char(state, def, base, next, check,
equiv[(u8) *str++]);
} else {
/* default is direct to next state */
while (*str)
match_char(state, def, base, next, check, (u8) *str++);
}
return state;
}
/**
* aa_dfa_next - step one character to the next state in the dfa
* @dfa: the dfa to traverse (NOT NULL)
* @state: the state to start in
* @c: the input character to transition on
*
* aa_dfa_match will step through the dfa by one input character @c
*
* Returns: state reach after input @c
*/
aa_state_t aa_dfa_next(struct aa_dfa *dfa, aa_state_t state, const char c)
{
u16 *def = DEFAULT_TABLE(dfa);
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
/* current state is <state>, matching character *str */
if (dfa->tables[YYTD_ID_EC]) {
/* Equivalence class table defined */
u8 *equiv = EQUIV_TABLE(dfa);
match_char(state, def, base, next, check, equiv[(u8) c]);
} else
match_char(state, def, base, next, check, (u8) c);
return state;
}
aa_state_t aa_dfa_outofband_transition(struct aa_dfa *dfa, aa_state_t state)
{
u16 *def = DEFAULT_TABLE(dfa);
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
u32 b = (base)[(state)];
if (!(b & MATCH_FLAG_OOB_TRANSITION))
return DFA_NOMATCH;
/* No Equivalence class remapping for outofband transitions */
match_char(state, def, base, next, check, -1);
return state;
}
/**
* aa_dfa_match_until - traverse @dfa until accept state or end of input
* @dfa: the dfa to match @str against (NOT NULL)
* @start: the state of the dfa to start matching in
* @str: the null terminated string of bytes to match against the dfa (NOT NULL)
* @retpos: first character in str after match OR end of string
*
* aa_dfa_match will match @str against the dfa and return the state it
* finished matching in. The final state can be used to look up the accepting
* label, or as the start state of a continuing match.
*
* Returns: final state reached after input is consumed
*/
aa_state_t aa_dfa_match_until(struct aa_dfa *dfa, aa_state_t start,
const char *str, const char **retpos)
{
u16 *def = DEFAULT_TABLE(dfa);
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
u32 *accept = ACCEPT_TABLE(dfa);
aa_state_t state = start, pos;
if (state == DFA_NOMATCH)
return DFA_NOMATCH;
/* current state is <state>, matching character *str */
if (dfa->tables[YYTD_ID_EC]) {
/* Equivalence class table defined */
u8 *equiv = EQUIV_TABLE(dfa);
/* default is direct to next state */
while (*str) {
pos = base_idx(base[state]) + equiv[(u8) *str++];
if (check[pos] == state)
state = next[pos];
else
state = def[state];
if (accept[state])
break;
}
} else {
/* default is direct to next state */
while (*str) {
pos = base_idx(base[state]) + (u8) *str++;
if (check[pos] == state)
state = next[pos];
else
state = def[state];
if (accept[state])
break;
}
}
*retpos = str;
return state;
}
/**
* aa_dfa_matchn_until - traverse @dfa until accept or @n bytes consumed
* @dfa: the dfa to match @str against (NOT NULL)
* @start: the state of the dfa to start matching in
* @str: the string of bytes to match against the dfa (NOT NULL)
* @n: length of the string of bytes to match
* @retpos: first character in str after match OR str + n
*
* aa_dfa_match_len will match @str against the dfa and return the state it
* finished matching in. The final state can be used to look up the accepting
* label, or as the start state of a continuing match.
*
* This function will happily match again the 0 byte and only finishes
* when @n input is consumed.
*
* Returns: final state reached after input is consumed
*/
aa_state_t aa_dfa_matchn_until(struct aa_dfa *dfa, aa_state_t start,
const char *str, int n, const char **retpos)
{
u16 *def = DEFAULT_TABLE(dfa);
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
u32 *accept = ACCEPT_TABLE(dfa);
aa_state_t state = start, pos;
*retpos = NULL;
if (state == DFA_NOMATCH)
return DFA_NOMATCH;
/* current state is <state>, matching character *str */
if (dfa->tables[YYTD_ID_EC]) {
/* Equivalence class table defined */
u8 *equiv = EQUIV_TABLE(dfa);
/* default is direct to next state */
for (; n; n--) {
pos = base_idx(base[state]) + equiv[(u8) *str++];
if (check[pos] == state)
state = next[pos];
else
state = def[state];
if (accept[state])
break;
}
} else {
/* default is direct to next state */
for (; n; n--) {
pos = base_idx(base[state]) + (u8) *str++;
if (check[pos] == state)
state = next[pos];
else
state = def[state];
if (accept[state])
break;
}
}
*retpos = str;
return state;
}
#define inc_wb_pos(wb) \
do { \
wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1); \
wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1); \
} while (0)
/* For DFAs that don't support extended tagging of states */
static bool is_loop(struct match_workbuf *wb, aa_state_t state,
unsigned int *adjust)
{
aa_state_t pos = wb->pos;
aa_state_t i;
if (wb->history[pos] < state)
return false;
for (i = 0; i <= wb->len; i++) {
if (wb->history[pos] == state) {
*adjust = i;
return true;
}
if (pos == 0)
pos = WB_HISTORY_SIZE;
pos--;
}
*adjust = i;
return true;
}
static aa_state_t leftmatch_fb(struct aa_dfa *dfa, aa_state_t start,
const char *str, struct match_workbuf *wb,
unsigned int *count)
{
u16 *def = DEFAULT_TABLE(dfa);
u32 *base = BASE_TABLE(dfa);
u16 *next = NEXT_TABLE(dfa);
u16 *check = CHECK_TABLE(dfa);
aa_state_t state = start, pos;
AA_BUG(!dfa);
AA_BUG(!str);
AA_BUG(!wb);
AA_BUG(!count);
*count = 0;
if (state == DFA_NOMATCH)
return DFA_NOMATCH;
/* current state is <state>, matching character *str */
if (dfa->tables[YYTD_ID_EC]) {
/* Equivalence class table defined */
u8 *equiv = EQUIV_TABLE(dfa);
/* default is direct to next state */
while (*str) {
unsigned int adjust;
wb->history[wb->pos] = state;
pos = base_idx(base[state]) + equiv[(u8) *str++];
if (check[pos] == state)
state = next[pos];
else
state = def[state];
if (is_loop(wb, state, &adjust)) {
state = aa_dfa_match(dfa, state, str);
*count -= adjust;
goto out;
}
inc_wb_pos(wb);
(*count)++;
}
} else {
/* default is direct to next state */
while (*str) {
unsigned int adjust;
wb->history[wb->pos] = state;
pos = base_idx(base[state]) + (u8) *str++;
if (check[pos] == state)
state = next[pos];
else
state = def[state];
if (is_loop(wb, state, &adjust)) {
state = aa_dfa_match(dfa, state, str);
*count -= adjust;
goto out;
}
inc_wb_pos(wb);
(*count)++;
}
}
out:
if (!state)
*count = 0;
return state;
}
/**
* aa_dfa_leftmatch - traverse @dfa to find state @str stops at
* @dfa: the dfa to match @str against (NOT NULL)
* @start: the state of the dfa to start matching in
* @str: the null terminated string of bytes to match against the dfa (NOT NULL)
* @count: current count of longest left.
*
* aa_dfa_match will match @str against the dfa and return the state it
* finished matching in. The final state can be used to look up the accepting
* label, or as the start state of a continuing match.
*
* Returns: final state reached after input is consumed
*/
aa_state_t aa_dfa_leftmatch(struct aa_dfa *dfa, aa_state_t start,
const char *str, unsigned int *count)
{
DEFINE_MATCH_WB(wb);
/* TODO: match for extended state dfas */
return leftmatch_fb(dfa, start, str, &wb, count);
}
| linux-master | security/apparmor/match.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor policy manipulation functions
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2017 Canonical Ltd.
*
* AppArmor policy namespaces, allow for different sets of policies
* to be loaded for tasks within the namespace.
*/
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "include/apparmor.h"
#include "include/cred.h"
#include "include/policy_ns.h"
#include "include/label.h"
#include "include/policy.h"
/* kernel label */
struct aa_label *kernel_t;
/* root profile namespace */
struct aa_ns *root_ns;
const char *aa_hidden_ns_name = "---";
/**
* aa_ns_visible - test if @view is visible from @curr
* @curr: namespace to treat as the parent (NOT NULL)
* @view: namespace to test if visible from @curr (NOT NULL)
* @subns: whether view of a subns is allowed
*
* Returns: true if @view is visible from @curr else false
*/
bool aa_ns_visible(struct aa_ns *curr, struct aa_ns *view, bool subns)
{
if (curr == view)
return true;
if (!subns)
return false;
for ( ; view; view = view->parent) {
if (view->parent == curr)
return true;
}
return false;
}
/**
* aa_ns_name - Find the ns name to display for @view from @curr
* @curr: current namespace (NOT NULL)
* @view: namespace attempting to view (NOT NULL)
* @subns: are subns visible
*
* Returns: name of @view visible from @curr
*/
const char *aa_ns_name(struct aa_ns *curr, struct aa_ns *view, bool subns)
{
/* if view == curr then the namespace name isn't displayed */
if (curr == view)
return "";
if (aa_ns_visible(curr, view, subns)) {
/* at this point if a ns is visible it is in a view ns
* thus the curr ns.hname is a prefix of its name.
* Only output the virtualized portion of the name
* Add + 2 to skip over // separating curr hname prefix
* from the visible tail of the views hname
*/
return view->base.hname + strlen(curr->base.hname) + 2;
}
return aa_hidden_ns_name;
}
static struct aa_profile *alloc_unconfined(const char *name)
{
struct aa_profile *profile;
profile = aa_alloc_null(NULL, name, GFP_KERNEL);
if (!profile)
return NULL;
profile->label.flags |= FLAG_IX_ON_NAME_ERROR |
FLAG_IMMUTIBLE | FLAG_NS_COUNT | FLAG_UNCONFINED;
profile->mode = APPARMOR_UNCONFINED;
return profile;
}
/**
* alloc_ns - allocate, initialize and return a new namespace
* @prefix: parent namespace name (MAYBE NULL)
* @name: a preallocated name (NOT NULL)
*
* Returns: refcounted namespace or NULL on failure.
*/
static struct aa_ns *alloc_ns(const char *prefix, const char *name)
{
struct aa_ns *ns;
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
AA_DEBUG("%s(%p)\n", __func__, ns);
if (!ns)
return NULL;
if (!aa_policy_init(&ns->base, prefix, name, GFP_KERNEL))
goto fail_ns;
INIT_LIST_HEAD(&ns->sub_ns);
INIT_LIST_HEAD(&ns->rawdata_list);
mutex_init(&ns->lock);
init_waitqueue_head(&ns->wait);
/* released by aa_free_ns() */
ns->unconfined = alloc_unconfined("unconfined");
if (!ns->unconfined)
goto fail_unconfined;
/* ns and ns->unconfined share ns->unconfined refcount */
ns->unconfined->ns = ns;
atomic_set(&ns->uniq_null, 0);
aa_labelset_init(&ns->labels);
return ns;
fail_unconfined:
aa_policy_destroy(&ns->base);
fail_ns:
kfree_sensitive(ns);
return NULL;
}
/**
* aa_free_ns - free a profile namespace
* @ns: the namespace to free (MAYBE NULL)
*
* Requires: All references to the namespace must have been put, if the
* namespace was referenced by a profile confining a task,
*/
void aa_free_ns(struct aa_ns *ns)
{
if (!ns)
return;
aa_policy_destroy(&ns->base);
aa_labelset_destroy(&ns->labels);
aa_put_ns(ns->parent);
ns->unconfined->ns = NULL;
aa_free_profile(ns->unconfined);
kfree_sensitive(ns);
}
/**
* aa_findn_ns - look up a profile namespace on the namespace list
* @root: namespace to search in (NOT NULL)
* @name: name of namespace to find (NOT NULL)
* @n: length of @name
*
* Returns: a refcounted namespace on the list, or NULL if no namespace
* called @name exists.
*
* refcount released by caller
*/
struct aa_ns *aa_findn_ns(struct aa_ns *root, const char *name, size_t n)
{
struct aa_ns *ns = NULL;
rcu_read_lock();
ns = aa_get_ns(__aa_findn_ns(&root->sub_ns, name, n));
rcu_read_unlock();
return ns;
}
/**
* aa_find_ns - look up a profile namespace on the namespace list
* @root: namespace to search in (NOT NULL)
* @name: name of namespace to find (NOT NULL)
*
* Returns: a refcounted namespace on the list, or NULL if no namespace
* called @name exists.
*
* refcount released by caller
*/
struct aa_ns *aa_find_ns(struct aa_ns *root, const char *name)
{
return aa_findn_ns(root, name, strlen(name));
}
/**
* __aa_lookupn_ns - lookup the namespace matching @hname
* @view: namespace to search in (NOT NULL)
* @hname: hierarchical ns name (NOT NULL)
* @n: length of @hname
*
* Requires: rcu_read_lock be held
*
* Returns: unrefcounted ns pointer or NULL if not found
*
* Do a relative name lookup, recursing through profile tree.
*/
struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n)
{
struct aa_ns *ns = view;
const char *split;
for (split = strnstr(hname, "//", n); split;
split = strnstr(hname, "//", n)) {
ns = __aa_findn_ns(&ns->sub_ns, hname, split - hname);
if (!ns)
return NULL;
n -= split + 2 - hname;
hname = split + 2;
}
if (n)
return __aa_findn_ns(&ns->sub_ns, hname, n);
return NULL;
}
/**
* aa_lookupn_ns - look up a policy namespace relative to @view
* @view: namespace to search in (NOT NULL)
* @name: name of namespace to find (NOT NULL)
* @n: length of @name
*
* Returns: a refcounted namespace on the list, or NULL if no namespace
* called @name exists.
*
* refcount released by caller
*/
struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n)
{
struct aa_ns *ns = NULL;
rcu_read_lock();
ns = aa_get_ns(__aa_lookupn_ns(view, name, n));
rcu_read_unlock();
return ns;
}
static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
struct dentry *dir)
{
struct aa_ns *ns;
int error;
AA_BUG(!parent);
AA_BUG(!name);
AA_BUG(!mutex_is_locked(&parent->lock));
ns = alloc_ns(parent->base.hname, name);
if (!ns)
return ERR_PTR(-ENOMEM);
ns->level = parent->level + 1;
mutex_lock_nested(&ns->lock, ns->level);
error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir);
if (error) {
AA_ERROR("Failed to create interface for ns %s\n",
ns->base.name);
mutex_unlock(&ns->lock);
aa_free_ns(ns);
return ERR_PTR(error);
}
ns->parent = aa_get_ns(parent);
list_add_rcu(&ns->base.list, &parent->sub_ns);
/* add list ref */
aa_get_ns(ns);
mutex_unlock(&ns->lock);
return ns;
}
/**
* __aa_find_or_create_ns - create an ns, fail if it already exists
* @parent: the parent of the namespace being created
* @name: the name of the namespace
* @dir: if not null the dir to put the ns entries in
*
* Returns: the a refcounted ns that has been add or an ERR_PTR
*/
struct aa_ns *__aa_find_or_create_ns(struct aa_ns *parent, const char *name,
struct dentry *dir)
{
struct aa_ns *ns;
AA_BUG(!mutex_is_locked(&parent->lock));
/* try and find the specified ns */
/* released by caller */
ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name));
if (!ns)
ns = __aa_create_ns(parent, name, dir);
else
ns = ERR_PTR(-EEXIST);
/* return ref */
return ns;
}
/**
* aa_prepare_ns - find an existing or create a new namespace of @name
* @parent: ns to treat as parent
* @name: the namespace to find or add (NOT NULL)
*
* Returns: refcounted namespace or PTR_ERR if failed to create one
*/
struct aa_ns *aa_prepare_ns(struct aa_ns *parent, const char *name)
{
struct aa_ns *ns;
mutex_lock_nested(&parent->lock, parent->level);
/* try and find the specified ns and if it doesn't exist create it */
/* released by caller */
ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name));
if (!ns)
ns = __aa_create_ns(parent, name, NULL);
mutex_unlock(&parent->lock);
/* return ref */
return ns;
}
static void __ns_list_release(struct list_head *head);
/**
* destroy_ns - remove everything contained by @ns
* @ns: namespace to have it contents removed (NOT NULL)
*/
static void destroy_ns(struct aa_ns *ns)
{
if (!ns)
return;
mutex_lock_nested(&ns->lock, ns->level);
/* release all profiles in this namespace */
__aa_profile_list_release(&ns->base.profiles);
/* release all sub namespaces */
__ns_list_release(&ns->sub_ns);
if (ns->parent) {
unsigned long flags;
write_lock_irqsave(&ns->labels.lock, flags);
__aa_proxy_redirect(ns_unconfined(ns),
ns_unconfined(ns->parent));
write_unlock_irqrestore(&ns->labels.lock, flags);
}
__aafs_ns_rmdir(ns);
mutex_unlock(&ns->lock);
}
/**
* __aa_remove_ns - remove a namespace and all its children
* @ns: namespace to be removed (NOT NULL)
*
* Requires: ns->parent->lock be held and ns removed from parent.
*/
void __aa_remove_ns(struct aa_ns *ns)
{
/* remove ns from namespace list */
list_del_rcu(&ns->base.list);
destroy_ns(ns);
aa_put_ns(ns);
}
/**
* __ns_list_release - remove all profile namespaces on the list put refs
* @head: list of profile namespaces (NOT NULL)
*
* Requires: namespace lock be held
*/
static void __ns_list_release(struct list_head *head)
{
struct aa_ns *ns, *tmp;
list_for_each_entry_safe(ns, tmp, head, base.list)
__aa_remove_ns(ns);
}
/**
* aa_alloc_root_ns - allocate the root profile namespace
*
* Returns: %0 on success else error
*
*/
int __init aa_alloc_root_ns(void)
{
struct aa_profile *kernel_p;
/* released by aa_free_root_ns - used as list ref*/
root_ns = alloc_ns(NULL, "root");
if (!root_ns)
return -ENOMEM;
kernel_p = alloc_unconfined("kernel_t");
if (!kernel_p) {
destroy_ns(root_ns);
aa_free_ns(root_ns);
return -ENOMEM;
}
kernel_t = &kernel_p->label;
root_ns->unconfined->ns = aa_get_ns(root_ns);
return 0;
}
/**
* aa_free_root_ns - free the root profile namespace
*/
void __init aa_free_root_ns(void)
{
struct aa_ns *ns = root_ns;
root_ns = NULL;
aa_label_free(kernel_t);
destroy_ns(ns);
aa_put_ns(ns);
}
| linux-master | security/apparmor/policy_ns.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor /sys/kernel/security/apparmor interface functions
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include <linux/ctype.h>
#include <linux/security.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/capability.h>
#include <linux/rcupdate.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/poll.h>
#include <linux/zstd.h>
#include <uapi/linux/major.h>
#include <uapi/linux/magic.h>
#include "include/apparmor.h"
#include "include/apparmorfs.h"
#include "include/audit.h"
#include "include/cred.h"
#include "include/crypto.h"
#include "include/ipc.h"
#include "include/label.h"
#include "include/policy.h"
#include "include/policy_ns.h"
#include "include/resource.h"
#include "include/policy_unpack.h"
#include "include/task.h"
/*
* The apparmor filesystem interface used for policy load and introspection
* The interface is split into two main components based on their function
* a securityfs component:
* used for static files that are always available, and which allows
* userspace to specificy the location of the security filesystem.
*
* fns and data are prefixed with
* aa_sfs_
*
* an apparmorfs component:
* used loaded policy content and introspection. It is not part of a
* regular mounted filesystem and is available only through the magic
* policy symlink in the root of the securityfs apparmor/ directory.
* Tasks queries will be magically redirected to the correct portion
* of the policy tree based on their confinement.
*
* fns and data are prefixed with
* aafs_
*
* The aa_fs_ prefix is used to indicate the fn is used by both the
* securityfs and apparmorfs filesystems.
*/
/*
* support fns
*/
struct rawdata_f_data {
struct aa_loaddata *loaddata;
};
#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
#define RAWDATA_F_DATA_BUF(p) (char *)(p + 1)
static void rawdata_f_data_free(struct rawdata_f_data *private)
{
if (!private)
return;
aa_put_loaddata(private->loaddata);
kvfree(private);
}
static struct rawdata_f_data *rawdata_f_data_alloc(size_t size)
{
struct rawdata_f_data *ret;
if (size > SIZE_MAX - sizeof(*ret))
return ERR_PTR(-EINVAL);
ret = kvzalloc(sizeof(*ret) + size, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
return ret;
}
#endif
/**
* mangle_name - mangle a profile name to std profile layout form
* @name: profile name to mangle (NOT NULL)
* @target: buffer to store mangled name, same length as @name (MAYBE NULL)
*
* Returns: length of mangled name
*/
static int mangle_name(const char *name, char *target)
{
char *t = target;
while (*name == '/' || *name == '.')
name++;
if (target) {
for (; *name; name++) {
if (*name == '/')
*(t)++ = '.';
else if (isspace(*name))
*(t)++ = '_';
else if (isalnum(*name) || strchr("._-", *name))
*(t)++ = *name;
}
*t = 0;
} else {
int len = 0;
for (; *name; name++) {
if (isalnum(*name) || isspace(*name) ||
strchr("/._-", *name))
len++;
}
return len;
}
return t - target;
}
/*
* aafs - core fns and data for the policy tree
*/
#define AAFS_NAME "apparmorfs"
static struct vfsmount *aafs_mnt;
static int aafs_count;
static int aafs_show_path(struct seq_file *seq, struct dentry *dentry)
{
seq_printf(seq, "%s:[%lu]", AAFS_NAME, d_inode(dentry)->i_ino);
return 0;
}
static void aafs_free_inode(struct inode *inode)
{
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
free_inode_nonrcu(inode);
}
static const struct super_operations aafs_super_ops = {
.statfs = simple_statfs,
.free_inode = aafs_free_inode,
.show_path = aafs_show_path,
};
static int apparmorfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
static struct tree_descr files[] = { {""} };
int error;
error = simple_fill_super(sb, AAFS_MAGIC, files);
if (error)
return error;
sb->s_op = &aafs_super_ops;
return 0;
}
static int apparmorfs_get_tree(struct fs_context *fc)
{
return get_tree_single(fc, apparmorfs_fill_super);
}
static const struct fs_context_operations apparmorfs_context_ops = {
.get_tree = apparmorfs_get_tree,
};
static int apparmorfs_init_fs_context(struct fs_context *fc)
{
fc->ops = &apparmorfs_context_ops;
return 0;
}
static struct file_system_type aafs_ops = {
.owner = THIS_MODULE,
.name = AAFS_NAME,
.init_fs_context = apparmorfs_init_fs_context,
.kill_sb = kill_anon_super,
};
/**
* __aafs_setup_d_inode - basic inode setup for apparmorfs
* @dir: parent directory for the dentry
* @dentry: dentry we are seting the inode up for
* @mode: permissions the file should have
* @data: data to store on inode.i_private, available in open()
* @link: if symlink, symlink target string
* @fops: struct file_operations that should be used
* @iops: struct of inode_operations that should be used
*/
static int __aafs_setup_d_inode(struct inode *dir, struct dentry *dentry,
umode_t mode, void *data, char *link,
const struct file_operations *fops,
const struct inode_operations *iops)
{
struct inode *inode = new_inode(dir->i_sb);
AA_BUG(!dir);
AA_BUG(!dentry);
if (!inode)
return -ENOMEM;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = iops ? iops : &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inc_nlink(inode);
inc_nlink(dir);
} else if (S_ISLNK(mode)) {
inode->i_op = iops ? iops : &simple_symlink_inode_operations;
inode->i_link = link;
} else {
inode->i_fop = fops;
}
d_instantiate(dentry, inode);
dget(dentry);
return 0;
}
/**
* aafs_create - create a dentry in the apparmorfs filesystem
*
* @name: name of dentry to create
* @mode: permissions the file should have
* @parent: parent directory for this dentry
* @data: data to store on inode.i_private, available in open()
* @link: if symlink, symlink target string
* @fops: struct file_operations that should be used for
* @iops: struct of inode_operations that should be used
*
* This is the basic "create a xxx" function for apparmorfs.
*
* Returns a pointer to a dentry if it succeeds, that must be free with
* aafs_remove(). Will return ERR_PTR on failure.
*/
static struct dentry *aafs_create(const char *name, umode_t mode,
struct dentry *parent, void *data, void *link,
const struct file_operations *fops,
const struct inode_operations *iops)
{
struct dentry *dentry;
struct inode *dir;
int error;
AA_BUG(!name);
AA_BUG(!parent);
if (!(mode & S_IFMT))
mode = (mode & S_IALLUGO) | S_IFREG;
error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count);
if (error)
return ERR_PTR(error);
dir = d_inode(parent);
inode_lock(dir);
dentry = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
goto fail_lock;
}
if (d_really_is_positive(dentry)) {
error = -EEXIST;
goto fail_dentry;
}
error = __aafs_setup_d_inode(dir, dentry, mode, data, link, fops, iops);
if (error)
goto fail_dentry;
inode_unlock(dir);
return dentry;
fail_dentry:
dput(dentry);
fail_lock:
inode_unlock(dir);
simple_release_fs(&aafs_mnt, &aafs_count);
return ERR_PTR(error);
}
/**
* aafs_create_file - create a file in the apparmorfs filesystem
*
* @name: name of dentry to create
* @mode: permissions the file should have
* @parent: parent directory for this dentry
* @data: data to store on inode.i_private, available in open()
* @fops: struct file_operations that should be used for
*
* see aafs_create
*/
static struct dentry *aafs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
return aafs_create(name, mode, parent, data, NULL, fops, NULL);
}
/**
* aafs_create_dir - create a directory in the apparmorfs filesystem
*
* @name: name of dentry to create
* @parent: parent directory for this dentry
*
* see aafs_create
*/
static struct dentry *aafs_create_dir(const char *name, struct dentry *parent)
{
return aafs_create(name, S_IFDIR | 0755, parent, NULL, NULL, NULL,
NULL);
}
/**
* aafs_remove - removes a file or directory from the apparmorfs filesystem
*
* @dentry: dentry of the file/directory/symlink to removed.
*/
static void aafs_remove(struct dentry *dentry)
{
struct inode *dir;
if (!dentry || IS_ERR(dentry))
return;
dir = d_inode(dentry->d_parent);
inode_lock(dir);
if (simple_positive(dentry)) {
if (d_is_dir(dentry))
simple_rmdir(dir, dentry);
else
simple_unlink(dir, dentry);
d_delete(dentry);
dput(dentry);
}
inode_unlock(dir);
simple_release_fs(&aafs_mnt, &aafs_count);
}
/*
* aa_fs - policy load/replace/remove
*/
/**
* aa_simple_write_to_buffer - common routine for getting policy from user
* @userbuf: user buffer to copy data from (NOT NULL)
* @alloc_size: size of user buffer (REQUIRES: @alloc_size >= @copy_size)
* @copy_size: size of data to copy from user buffer
* @pos: position write is at in the file (NOT NULL)
*
* Returns: kernel buffer containing copy of user buffer data or an
* ERR_PTR on failure.
*/
static struct aa_loaddata *aa_simple_write_to_buffer(const char __user *userbuf,
size_t alloc_size,
size_t copy_size,
loff_t *pos)
{
struct aa_loaddata *data;
AA_BUG(copy_size > alloc_size);
if (*pos != 0)
/* only writes from pos 0, that is complete writes */
return ERR_PTR(-ESPIPE);
/* freed by caller to simple_write_to_buffer */
data = aa_loaddata_alloc(alloc_size);
if (IS_ERR(data))
return data;
data->size = copy_size;
if (copy_from_user(data->data, userbuf, copy_size)) {
aa_put_loaddata(data);
return ERR_PTR(-EFAULT);
}
return data;
}
static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
loff_t *pos, struct aa_ns *ns)
{
struct aa_loaddata *data;
struct aa_label *label;
ssize_t error;
label = begin_current_label_crit_section();
/* high level check about policy management - fine grained in
* below after unpack
*/
error = aa_may_manage_policy(label, ns, mask);
if (error)
goto end_section;
data = aa_simple_write_to_buffer(buf, size, size, pos);
error = PTR_ERR(data);
if (!IS_ERR(data)) {
error = aa_replace_profiles(ns, label, mask, data);
aa_put_loaddata(data);
}
end_section:
end_current_label_crit_section(label);
return error;
}
/* .load file hook fn to load policy */
static ssize_t profile_load(struct file *f, const char __user *buf, size_t size,
loff_t *pos)
{
struct aa_ns *ns = aa_get_ns(f->f_inode->i_private);
int error = policy_update(AA_MAY_LOAD_POLICY, buf, size, pos, ns);
aa_put_ns(ns);
return error;
}
static const struct file_operations aa_fs_profile_load = {
.write = profile_load,
.llseek = default_llseek,
};
/* .replace file hook fn to load and/or replace policy */
static ssize_t profile_replace(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct aa_ns *ns = aa_get_ns(f->f_inode->i_private);
int error = policy_update(AA_MAY_LOAD_POLICY | AA_MAY_REPLACE_POLICY,
buf, size, pos, ns);
aa_put_ns(ns);
return error;
}
static const struct file_operations aa_fs_profile_replace = {
.write = profile_replace,
.llseek = default_llseek,
};
/* .remove file hook fn to remove loaded policy */
static ssize_t profile_remove(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct aa_loaddata *data;
struct aa_label *label;
ssize_t error;
struct aa_ns *ns = aa_get_ns(f->f_inode->i_private);
label = begin_current_label_crit_section();
/* high level check about policy management - fine grained in
* below after unpack
*/
error = aa_may_manage_policy(label, ns, AA_MAY_REMOVE_POLICY);
if (error)
goto out;
/*
* aa_remove_profile needs a null terminated string so 1 extra
* byte is allocated and the copied data is null terminated.
*/
data = aa_simple_write_to_buffer(buf, size + 1, size, pos);
error = PTR_ERR(data);
if (!IS_ERR(data)) {
data->data[size] = 0;
error = aa_remove_profiles(ns, label, data->data, size);
aa_put_loaddata(data);
}
out:
end_current_label_crit_section(label);
aa_put_ns(ns);
return error;
}
static const struct file_operations aa_fs_profile_remove = {
.write = profile_remove,
.llseek = default_llseek,
};
struct aa_revision {
struct aa_ns *ns;
long last_read;
};
/* revision file hook fn for policy loads */
static int ns_revision_release(struct inode *inode, struct file *file)
{
struct aa_revision *rev = file->private_data;
if (rev) {
aa_put_ns(rev->ns);
kfree(rev);
}
return 0;
}
static ssize_t ns_revision_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct aa_revision *rev = file->private_data;
char buffer[32];
long last_read;
int avail;
mutex_lock_nested(&rev->ns->lock, rev->ns->level);
last_read = rev->last_read;
if (last_read == rev->ns->revision) {
mutex_unlock(&rev->ns->lock);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(rev->ns->wait,
last_read !=
READ_ONCE(rev->ns->revision)))
return -ERESTARTSYS;
mutex_lock_nested(&rev->ns->lock, rev->ns->level);
}
avail = sprintf(buffer, "%ld\n", rev->ns->revision);
if (*ppos + size > avail) {
rev->last_read = rev->ns->revision;
*ppos = 0;
}
mutex_unlock(&rev->ns->lock);
return simple_read_from_buffer(buf, size, ppos, buffer, avail);
}
static int ns_revision_open(struct inode *inode, struct file *file)
{
struct aa_revision *rev = kzalloc(sizeof(*rev), GFP_KERNEL);
if (!rev)
return -ENOMEM;
rev->ns = aa_get_ns(inode->i_private);
if (!rev->ns)
rev->ns = aa_get_current_ns();
file->private_data = rev;
return 0;
}
static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
{
struct aa_revision *rev = file->private_data;
__poll_t mask = 0;
if (rev) {
mutex_lock_nested(&rev->ns->lock, rev->ns->level);
poll_wait(file, &rev->ns->wait, pt);
if (rev->last_read < rev->ns->revision)
mask |= EPOLLIN | EPOLLRDNORM;
mutex_unlock(&rev->ns->lock);
}
return mask;
}
void __aa_bump_ns_revision(struct aa_ns *ns)
{
WRITE_ONCE(ns->revision, READ_ONCE(ns->revision) + 1);
wake_up_interruptible(&ns->wait);
}
static const struct file_operations aa_fs_ns_revision_fops = {
.owner = THIS_MODULE,
.open = ns_revision_open,
.poll = ns_revision_poll,
.read = ns_revision_read,
.llseek = generic_file_llseek,
.release = ns_revision_release,
};
static void profile_query_cb(struct aa_profile *profile, struct aa_perms *perms,
const char *match_str, size_t match_len)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_perms tmp = { };
aa_state_t state = DFA_NOMATCH;
if (profile_unconfined(profile))
return;
if (rules->file.dfa && *match_str == AA_CLASS_FILE) {
state = aa_dfa_match_len(rules->file.dfa,
rules->file.start[AA_CLASS_FILE],
match_str + 1, match_len - 1);
if (state) {
struct path_cond cond = { };
tmp = *(aa_lookup_fperms(&(rules->file), state, &cond));
}
} else if (rules->policy.dfa) {
if (!RULE_MEDIATES(rules, *match_str))
return; /* no change to current perms */
state = aa_dfa_match_len(rules->policy.dfa,
rules->policy.start[0],
match_str, match_len);
if (state)
tmp = *aa_lookup_perms(&rules->policy, state);
}
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum_raw(perms, &tmp);
}
/**
* query_data - queries a policy and writes its data to buf
* @buf: the resulting data is stored here (NOT NULL)
* @buf_len: size of buf
* @query: query string used to retrieve data
* @query_len: size of query including second NUL byte
*
* The buffers pointed to by buf and query may overlap. The query buffer is
* parsed before buf is written to.
*
* The query should look like "<LABEL>\0<KEY>\0", where <LABEL> is the name of
* the security confinement context and <KEY> is the name of the data to
* retrieve. <LABEL> and <KEY> must not be NUL-terminated.
*
* Don't expect the contents of buf to be preserved on failure.
*
* Returns: number of characters written to buf or -errno on failure
*/
static ssize_t query_data(char *buf, size_t buf_len,
char *query, size_t query_len)
{
char *out;
const char *key;
struct label_it i;
struct aa_label *label, *curr;
struct aa_profile *profile;
struct aa_data *data;
u32 bytes, blocks;
__le32 outle32;
if (!query_len)
return -EINVAL; /* need a query */
key = query + strnlen(query, query_len) + 1;
if (key + 1 >= query + query_len)
return -EINVAL; /* not enough space for a non-empty key */
if (key + strnlen(key, query + query_len - key) >= query + query_len)
return -EINVAL; /* must end with NUL */
if (buf_len < sizeof(bytes) + sizeof(blocks))
return -EINVAL; /* not enough space */
curr = begin_current_label_crit_section();
label = aa_label_parse(curr, query, GFP_KERNEL, false, false);
end_current_label_crit_section(curr);
if (IS_ERR(label))
return PTR_ERR(label);
/* We are going to leave space for two numbers. The first is the total
* number of bytes we are writing after the first number. This is so
* users can read the full output without reallocation.
*
* The second number is the number of data blocks we're writing. An
* application might be confined by multiple policies having data in
* the same key.
*/
memset(buf, 0, sizeof(bytes) + sizeof(blocks));
out = buf + sizeof(bytes) + sizeof(blocks);
blocks = 0;
label_for_each_confined(i, label, profile) {
if (!profile->data)
continue;
data = rhashtable_lookup_fast(profile->data, &key,
profile->data->p);
if (data) {
if (out + sizeof(outle32) + data->size > buf +
buf_len) {
aa_put_label(label);
return -EINVAL; /* not enough space */
}
outle32 = __cpu_to_le32(data->size);
memcpy(out, &outle32, sizeof(outle32));
out += sizeof(outle32);
memcpy(out, data->data, data->size);
out += data->size;
blocks++;
}
}
aa_put_label(label);
outle32 = __cpu_to_le32(out - buf - sizeof(bytes));
memcpy(buf, &outle32, sizeof(outle32));
outle32 = __cpu_to_le32(blocks);
memcpy(buf + sizeof(bytes), &outle32, sizeof(outle32));
return out - buf;
}
/**
* query_label - queries a label and writes permissions to buf
* @buf: the resulting permissions string is stored here (NOT NULL)
* @buf_len: size of buf
* @query: binary query string to match against the dfa
* @query_len: size of query
* @view_only: only compute for querier's view
*
* The buffers pointed to by buf and query may overlap. The query buffer is
* parsed before buf is written to.
*
* The query should look like "LABEL_NAME\0DFA_STRING" where LABEL_NAME is
* the name of the label, in the current namespace, that is to be queried and
* DFA_STRING is a binary string to match against the label(s)'s DFA.
*
* LABEL_NAME must be NUL terminated. DFA_STRING may contain NUL characters
* but must *not* be NUL terminated.
*
* Returns: number of characters written to buf or -errno on failure
*/
static ssize_t query_label(char *buf, size_t buf_len,
char *query, size_t query_len, bool view_only)
{
struct aa_profile *profile;
struct aa_label *label, *curr;
char *label_name, *match_str;
size_t label_name_len, match_len;
struct aa_perms perms;
struct label_it i;
if (!query_len)
return -EINVAL;
label_name = query;
label_name_len = strnlen(query, query_len);
if (!label_name_len || label_name_len == query_len)
return -EINVAL;
/**
* The extra byte is to account for the null byte between the
* profile name and dfa string. profile_name_len is greater
* than zero and less than query_len, so a byte can be safely
* added or subtracted.
*/
match_str = label_name + label_name_len + 1;
match_len = query_len - label_name_len - 1;
curr = begin_current_label_crit_section();
label = aa_label_parse(curr, label_name, GFP_KERNEL, false, false);
end_current_label_crit_section(curr);
if (IS_ERR(label))
return PTR_ERR(label);
perms = allperms;
if (view_only) {
label_for_each_in_ns(i, labels_ns(label), label, profile) {
profile_query_cb(profile, &perms, match_str, match_len);
}
} else {
label_for_each(i, label, profile) {
profile_query_cb(profile, &perms, match_str, match_len);
}
}
aa_put_label(label);
return scnprintf(buf, buf_len,
"allow 0x%08x\ndeny 0x%08x\naudit 0x%08x\nquiet 0x%08x\n",
perms.allow, perms.deny, perms.audit, perms.quiet);
}
/*
* Transaction based IO.
* The file expects a write which triggers the transaction, and then
* possibly a read(s) which collects the result - which is stored in a
* file-local buffer. Once a new write is performed, a new set of results
* are stored in the file-local buffer.
*/
struct multi_transaction {
struct kref count;
ssize_t size;
char data[];
};
#define MULTI_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct multi_transaction))
static void multi_transaction_kref(struct kref *kref)
{
struct multi_transaction *t;
t = container_of(kref, struct multi_transaction, count);
free_page((unsigned long) t);
}
static struct multi_transaction *
get_multi_transaction(struct multi_transaction *t)
{
if (t)
kref_get(&(t->count));
return t;
}
static void put_multi_transaction(struct multi_transaction *t)
{
if (t)
kref_put(&(t->count), multi_transaction_kref);
}
/* does not increment @new's count */
static void multi_transaction_set(struct file *file,
struct multi_transaction *new, size_t n)
{
struct multi_transaction *old;
AA_BUG(n > MULTI_TRANSACTION_LIMIT);
new->size = n;
spin_lock(&file->f_lock);
old = (struct multi_transaction *) file->private_data;
file->private_data = new;
spin_unlock(&file->f_lock);
put_multi_transaction(old);
}
static struct multi_transaction *multi_transaction_new(struct file *file,
const char __user *buf,
size_t size)
{
struct multi_transaction *t;
if (size > MULTI_TRANSACTION_LIMIT - 1)
return ERR_PTR(-EFBIG);
t = (struct multi_transaction *)get_zeroed_page(GFP_KERNEL);
if (!t)
return ERR_PTR(-ENOMEM);
kref_init(&t->count);
if (copy_from_user(t->data, buf, size)) {
put_multi_transaction(t);
return ERR_PTR(-EFAULT);
}
return t;
}
static ssize_t multi_transaction_read(struct file *file, char __user *buf,
size_t size, loff_t *pos)
{
struct multi_transaction *t;
ssize_t ret;
spin_lock(&file->f_lock);
t = get_multi_transaction(file->private_data);
spin_unlock(&file->f_lock);
if (!t)
return 0;
ret = simple_read_from_buffer(buf, size, pos, t->data, t->size);
put_multi_transaction(t);
return ret;
}
static int multi_transaction_release(struct inode *inode, struct file *file)
{
put_multi_transaction(file->private_data);
return 0;
}
#define QUERY_CMD_LABEL "label\0"
#define QUERY_CMD_LABEL_LEN 6
#define QUERY_CMD_PROFILE "profile\0"
#define QUERY_CMD_PROFILE_LEN 8
#define QUERY_CMD_LABELALL "labelall\0"
#define QUERY_CMD_LABELALL_LEN 9
#define QUERY_CMD_DATA "data\0"
#define QUERY_CMD_DATA_LEN 5
/**
* aa_write_access - generic permissions and data query
* @file: pointer to open apparmorfs/access file
* @ubuf: user buffer containing the complete query string (NOT NULL)
* @count: size of ubuf
* @ppos: position in the file (MUST BE ZERO)
*
* Allows for one permissions or data query per open(), write(), and read()
* sequence. The only queries currently supported are label-based queries for
* permissions or data.
*
* For permissions queries, ubuf must begin with "label\0", followed by the
* profile query specific format described in the query_label() function
* documentation.
*
* For data queries, ubuf must have the form "data\0<LABEL>\0<KEY>\0", where
* <LABEL> is the name of the security confinement context and <KEY> is the
* name of the data to retrieve.
*
* Returns: number of bytes written or -errno on failure
*/
static ssize_t aa_write_access(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct multi_transaction *t;
ssize_t len;
if (*ppos)
return -ESPIPE;
t = multi_transaction_new(file, ubuf, count);
if (IS_ERR(t))
return PTR_ERR(t);
if (count > QUERY_CMD_PROFILE_LEN &&
!memcmp(t->data, QUERY_CMD_PROFILE, QUERY_CMD_PROFILE_LEN)) {
len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
t->data + QUERY_CMD_PROFILE_LEN,
count - QUERY_CMD_PROFILE_LEN, true);
} else if (count > QUERY_CMD_LABEL_LEN &&
!memcmp(t->data, QUERY_CMD_LABEL, QUERY_CMD_LABEL_LEN)) {
len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
t->data + QUERY_CMD_LABEL_LEN,
count - QUERY_CMD_LABEL_LEN, true);
} else if (count > QUERY_CMD_LABELALL_LEN &&
!memcmp(t->data, QUERY_CMD_LABELALL,
QUERY_CMD_LABELALL_LEN)) {
len = query_label(t->data, MULTI_TRANSACTION_LIMIT,
t->data + QUERY_CMD_LABELALL_LEN,
count - QUERY_CMD_LABELALL_LEN, false);
} else if (count > QUERY_CMD_DATA_LEN &&
!memcmp(t->data, QUERY_CMD_DATA, QUERY_CMD_DATA_LEN)) {
len = query_data(t->data, MULTI_TRANSACTION_LIMIT,
t->data + QUERY_CMD_DATA_LEN,
count - QUERY_CMD_DATA_LEN);
} else
len = -EINVAL;
if (len < 0) {
put_multi_transaction(t);
return len;
}
multi_transaction_set(file, t, len);
return count;
}
static const struct file_operations aa_sfs_access = {
.write = aa_write_access,
.read = multi_transaction_read,
.release = multi_transaction_release,
.llseek = generic_file_llseek,
};
static int aa_sfs_seq_show(struct seq_file *seq, void *v)
{
struct aa_sfs_entry *fs_file = seq->private;
if (!fs_file)
return 0;
switch (fs_file->v_type) {
case AA_SFS_TYPE_BOOLEAN:
seq_printf(seq, "%s\n", fs_file->v.boolean ? "yes" : "no");
break;
case AA_SFS_TYPE_STRING:
seq_printf(seq, "%s\n", fs_file->v.string);
break;
case AA_SFS_TYPE_U64:
seq_printf(seq, "%#08lx\n", fs_file->v.u64);
break;
default:
/* Ignore unpritable entry types. */
break;
}
return 0;
}
static int aa_sfs_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, aa_sfs_seq_show, inode->i_private);
}
const struct file_operations aa_sfs_seq_file_ops = {
.owner = THIS_MODULE,
.open = aa_sfs_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* profile based file operations
* policy/profiles/XXXX/profiles/ *
*/
#define SEQ_PROFILE_FOPS(NAME) \
static int seq_profile_ ##NAME ##_open(struct inode *inode, struct file *file)\
{ \
return seq_profile_open(inode, file, seq_profile_ ##NAME ##_show); \
} \
\
static const struct file_operations seq_profile_ ##NAME ##_fops = { \
.owner = THIS_MODULE, \
.open = seq_profile_ ##NAME ##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = seq_profile_release, \
} \
static int seq_profile_open(struct inode *inode, struct file *file,
int (*show)(struct seq_file *, void *))
{
struct aa_proxy *proxy = aa_get_proxy(inode->i_private);
int error = single_open(file, show, proxy);
if (error) {
file->private_data = NULL;
aa_put_proxy(proxy);
}
return error;
}
static int seq_profile_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = (struct seq_file *) file->private_data;
if (seq)
aa_put_proxy(seq->private);
return single_release(inode, file);
}
static int seq_profile_name_show(struct seq_file *seq, void *v)
{
struct aa_proxy *proxy = seq->private;
struct aa_label *label = aa_get_label_rcu(&proxy->label);
struct aa_profile *profile = labels_profile(label);
seq_printf(seq, "%s\n", profile->base.name);
aa_put_label(label);
return 0;
}
static int seq_profile_mode_show(struct seq_file *seq, void *v)
{
struct aa_proxy *proxy = seq->private;
struct aa_label *label = aa_get_label_rcu(&proxy->label);
struct aa_profile *profile = labels_profile(label);
seq_printf(seq, "%s\n", aa_profile_mode_names[profile->mode]);
aa_put_label(label);
return 0;
}
static int seq_profile_attach_show(struct seq_file *seq, void *v)
{
struct aa_proxy *proxy = seq->private;
struct aa_label *label = aa_get_label_rcu(&proxy->label);
struct aa_profile *profile = labels_profile(label);
if (profile->attach.xmatch_str)
seq_printf(seq, "%s\n", profile->attach.xmatch_str);
else if (profile->attach.xmatch.dfa)
seq_puts(seq, "<unknown>\n");
else
seq_printf(seq, "%s\n", profile->base.name);
aa_put_label(label);
return 0;
}
static int seq_profile_hash_show(struct seq_file *seq, void *v)
{
struct aa_proxy *proxy = seq->private;
struct aa_label *label = aa_get_label_rcu(&proxy->label);
struct aa_profile *profile = labels_profile(label);
unsigned int i, size = aa_hash_size();
if (profile->hash) {
for (i = 0; i < size; i++)
seq_printf(seq, "%.2x", profile->hash[i]);
seq_putc(seq, '\n');
}
aa_put_label(label);
return 0;
}
SEQ_PROFILE_FOPS(name);
SEQ_PROFILE_FOPS(mode);
SEQ_PROFILE_FOPS(attach);
SEQ_PROFILE_FOPS(hash);
/*
* namespace based files
* several root files and
* policy/ *
*/
#define SEQ_NS_FOPS(NAME) \
static int seq_ns_ ##NAME ##_open(struct inode *inode, struct file *file) \
{ \
return single_open(file, seq_ns_ ##NAME ##_show, inode->i_private); \
} \
\
static const struct file_operations seq_ns_ ##NAME ##_fops = { \
.owner = THIS_MODULE, \
.open = seq_ns_ ##NAME ##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
} \
static int seq_ns_stacked_show(struct seq_file *seq, void *v)
{
struct aa_label *label;
label = begin_current_label_crit_section();
seq_printf(seq, "%s\n", label->size > 1 ? "yes" : "no");
end_current_label_crit_section(label);
return 0;
}
static int seq_ns_nsstacked_show(struct seq_file *seq, void *v)
{
struct aa_label *label;
struct aa_profile *profile;
struct label_it it;
int count = 1;
label = begin_current_label_crit_section();
if (label->size > 1) {
label_for_each(it, label, profile)
if (profile->ns != labels_ns(label)) {
count++;
break;
}
}
seq_printf(seq, "%s\n", count > 1 ? "yes" : "no");
end_current_label_crit_section(label);
return 0;
}
static int seq_ns_level_show(struct seq_file *seq, void *v)
{
struct aa_label *label;
label = begin_current_label_crit_section();
seq_printf(seq, "%d\n", labels_ns(label)->level);
end_current_label_crit_section(label);
return 0;
}
static int seq_ns_name_show(struct seq_file *seq, void *v)
{
struct aa_label *label = begin_current_label_crit_section();
seq_printf(seq, "%s\n", labels_ns(label)->base.name);
end_current_label_crit_section(label);
return 0;
}
static int seq_ns_compress_min_show(struct seq_file *seq, void *v)
{
seq_printf(seq, "%d\n", AA_MIN_CLEVEL);
return 0;
}
static int seq_ns_compress_max_show(struct seq_file *seq, void *v)
{
seq_printf(seq, "%d\n", AA_MAX_CLEVEL);
return 0;
}
SEQ_NS_FOPS(stacked);
SEQ_NS_FOPS(nsstacked);
SEQ_NS_FOPS(level);
SEQ_NS_FOPS(name);
SEQ_NS_FOPS(compress_min);
SEQ_NS_FOPS(compress_max);
/* policy/raw_data/ * file ops */
#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
#define SEQ_RAWDATA_FOPS(NAME) \
static int seq_rawdata_ ##NAME ##_open(struct inode *inode, struct file *file)\
{ \
return seq_rawdata_open(inode, file, seq_rawdata_ ##NAME ##_show); \
} \
\
static const struct file_operations seq_rawdata_ ##NAME ##_fops = { \
.owner = THIS_MODULE, \
.open = seq_rawdata_ ##NAME ##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = seq_rawdata_release, \
} \
static int seq_rawdata_open(struct inode *inode, struct file *file,
int (*show)(struct seq_file *, void *))
{
struct aa_loaddata *data = __aa_get_loaddata(inode->i_private);
int error;
if (!data)
/* lost race this ent is being reaped */
return -ENOENT;
error = single_open(file, show, data);
if (error) {
AA_BUG(file->private_data &&
((struct seq_file *)file->private_data)->private);
aa_put_loaddata(data);
}
return error;
}
static int seq_rawdata_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = (struct seq_file *) file->private_data;
if (seq)
aa_put_loaddata(seq->private);
return single_release(inode, file);
}
static int seq_rawdata_abi_show(struct seq_file *seq, void *v)
{
struct aa_loaddata *data = seq->private;
seq_printf(seq, "v%d\n", data->abi);
return 0;
}
static int seq_rawdata_revision_show(struct seq_file *seq, void *v)
{
struct aa_loaddata *data = seq->private;
seq_printf(seq, "%ld\n", data->revision);
return 0;
}
static int seq_rawdata_hash_show(struct seq_file *seq, void *v)
{
struct aa_loaddata *data = seq->private;
unsigned int i, size = aa_hash_size();
if (data->hash) {
for (i = 0; i < size; i++)
seq_printf(seq, "%.2x", data->hash[i]);
seq_putc(seq, '\n');
}
return 0;
}
static int seq_rawdata_compressed_size_show(struct seq_file *seq, void *v)
{
struct aa_loaddata *data = seq->private;
seq_printf(seq, "%zu\n", data->compressed_size);
return 0;
}
SEQ_RAWDATA_FOPS(abi);
SEQ_RAWDATA_FOPS(revision);
SEQ_RAWDATA_FOPS(hash);
SEQ_RAWDATA_FOPS(compressed_size);
static int decompress_zstd(char *src, size_t slen, char *dst, size_t dlen)
{
#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
if (slen < dlen) {
const size_t wksp_len = zstd_dctx_workspace_bound();
zstd_dctx *ctx;
void *wksp;
size_t out_len;
int ret = 0;
wksp = kvzalloc(wksp_len, GFP_KERNEL);
if (!wksp) {
ret = -ENOMEM;
goto cleanup;
}
ctx = zstd_init_dctx(wksp, wksp_len);
if (ctx == NULL) {
ret = -ENOMEM;
goto cleanup;
}
out_len = zstd_decompress_dctx(ctx, dst, dlen, src, slen);
if (zstd_is_error(out_len)) {
ret = -EINVAL;
goto cleanup;
}
cleanup:
kvfree(wksp);
return ret;
}
#endif
if (dlen < slen)
return -EINVAL;
memcpy(dst, src, slen);
return 0;
}
static ssize_t rawdata_read(struct file *file, char __user *buf, size_t size,
loff_t *ppos)
{
struct rawdata_f_data *private = file->private_data;
return simple_read_from_buffer(buf, size, ppos,
RAWDATA_F_DATA_BUF(private),
private->loaddata->size);
}
static int rawdata_release(struct inode *inode, struct file *file)
{
rawdata_f_data_free(file->private_data);
return 0;
}
static int rawdata_open(struct inode *inode, struct file *file)
{
int error;
struct aa_loaddata *loaddata;
struct rawdata_f_data *private;
if (!aa_current_policy_view_capable(NULL))
return -EACCES;
loaddata = __aa_get_loaddata(inode->i_private);
if (!loaddata)
/* lost race: this entry is being reaped */
return -ENOENT;
private = rawdata_f_data_alloc(loaddata->size);
if (IS_ERR(private)) {
error = PTR_ERR(private);
goto fail_private_alloc;
}
private->loaddata = loaddata;
error = decompress_zstd(loaddata->data, loaddata->compressed_size,
RAWDATA_F_DATA_BUF(private),
loaddata->size);
if (error)
goto fail_decompress;
file->private_data = private;
return 0;
fail_decompress:
rawdata_f_data_free(private);
return error;
fail_private_alloc:
aa_put_loaddata(loaddata);
return error;
}
static const struct file_operations rawdata_fops = {
.open = rawdata_open,
.read = rawdata_read,
.llseek = generic_file_llseek,
.release = rawdata_release,
};
static void remove_rawdata_dents(struct aa_loaddata *rawdata)
{
int i;
for (i = 0; i < AAFS_LOADDATA_NDENTS; i++) {
if (!IS_ERR_OR_NULL(rawdata->dents[i])) {
/* no refcounts on i_private */
aafs_remove(rawdata->dents[i]);
rawdata->dents[i] = NULL;
}
}
}
void __aa_fs_remove_rawdata(struct aa_loaddata *rawdata)
{
AA_BUG(rawdata->ns && !mutex_is_locked(&rawdata->ns->lock));
if (rawdata->ns) {
remove_rawdata_dents(rawdata);
list_del_init(&rawdata->list);
aa_put_ns(rawdata->ns);
rawdata->ns = NULL;
}
}
int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata)
{
struct dentry *dent, *dir;
AA_BUG(!ns);
AA_BUG(!rawdata);
AA_BUG(!mutex_is_locked(&ns->lock));
AA_BUG(!ns_subdata_dir(ns));
/*
* just use ns revision dir was originally created at. This is
* under ns->lock and if load is successful revision will be
* bumped and is guaranteed to be unique
*/
rawdata->name = kasprintf(GFP_KERNEL, "%ld", ns->revision);
if (!rawdata->name)
return -ENOMEM;
dir = aafs_create_dir(rawdata->name, ns_subdata_dir(ns));
if (IS_ERR(dir))
/* ->name freed when rawdata freed */
return PTR_ERR(dir);
rawdata->dents[AAFS_LOADDATA_DIR] = dir;
dent = aafs_create_file("abi", S_IFREG | 0444, dir, rawdata,
&seq_rawdata_abi_fops);
if (IS_ERR(dent))
goto fail;
rawdata->dents[AAFS_LOADDATA_ABI] = dent;
dent = aafs_create_file("revision", S_IFREG | 0444, dir, rawdata,
&seq_rawdata_revision_fops);
if (IS_ERR(dent))
goto fail;
rawdata->dents[AAFS_LOADDATA_REVISION] = dent;
if (aa_g_hash_policy) {
dent = aafs_create_file("sha1", S_IFREG | 0444, dir,
rawdata, &seq_rawdata_hash_fops);
if (IS_ERR(dent))
goto fail;
rawdata->dents[AAFS_LOADDATA_HASH] = dent;
}
dent = aafs_create_file("compressed_size", S_IFREG | 0444, dir,
rawdata,
&seq_rawdata_compressed_size_fops);
if (IS_ERR(dent))
goto fail;
rawdata->dents[AAFS_LOADDATA_COMPRESSED_SIZE] = dent;
dent = aafs_create_file("raw_data", S_IFREG | 0444,
dir, rawdata, &rawdata_fops);
if (IS_ERR(dent))
goto fail;
rawdata->dents[AAFS_LOADDATA_DATA] = dent;
d_inode(dent)->i_size = rawdata->size;
rawdata->ns = aa_get_ns(ns);
list_add(&rawdata->list, &ns->rawdata_list);
/* no refcount on inode rawdata */
return 0;
fail:
remove_rawdata_dents(rawdata);
return PTR_ERR(dent);
}
#endif /* CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
/** fns to setup dynamic per profile/namespace files **/
/*
*
* Requires: @profile->ns->lock held
*/
void __aafs_profile_rmdir(struct aa_profile *profile)
{
struct aa_profile *child;
int i;
if (!profile)
return;
list_for_each_entry(child, &profile->base.profiles, base.list)
__aafs_profile_rmdir(child);
for (i = AAFS_PROF_SIZEOF - 1; i >= 0; --i) {
struct aa_proxy *proxy;
if (!profile->dents[i])
continue;
proxy = d_inode(profile->dents[i])->i_private;
aafs_remove(profile->dents[i]);
aa_put_proxy(proxy);
profile->dents[i] = NULL;
}
}
/*
*
* Requires: @old->ns->lock held
*/
void __aafs_profile_migrate_dents(struct aa_profile *old,
struct aa_profile *new)
{
int i;
AA_BUG(!old);
AA_BUG(!new);
AA_BUG(!mutex_is_locked(&profiles_ns(old)->lock));
for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
new->dents[i] = old->dents[i];
if (new->dents[i]) {
struct inode *inode = d_inode(new->dents[i]);
inode->i_mtime = inode_set_ctime_current(inode);
}
old->dents[i] = NULL;
}
}
static struct dentry *create_profile_file(struct dentry *dir, const char *name,
struct aa_profile *profile,
const struct file_operations *fops)
{
struct aa_proxy *proxy = aa_get_proxy(profile->label.proxy);
struct dentry *dent;
dent = aafs_create_file(name, S_IFREG | 0444, dir, proxy, fops);
if (IS_ERR(dent))
aa_put_proxy(proxy);
return dent;
}
#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
static int profile_depth(struct aa_profile *profile)
{
int depth = 0;
rcu_read_lock();
for (depth = 0; profile; profile = rcu_access_pointer(profile->parent))
depth++;
rcu_read_unlock();
return depth;
}
static char *gen_symlink_name(int depth, const char *dirname, const char *fname)
{
char *buffer, *s;
int error;
int size = depth * 6 + strlen(dirname) + strlen(fname) + 11;
s = buffer = kmalloc(size, GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
for (; depth > 0; depth--) {
strcpy(s, "../../");
s += 6;
size -= 6;
}
error = snprintf(s, size, "raw_data/%s/%s", dirname, fname);
if (error >= size || error < 0) {
kfree(buffer);
return ERR_PTR(-ENAMETOOLONG);
}
return buffer;
}
static void rawdata_link_cb(void *arg)
{
kfree(arg);
}
static const char *rawdata_get_link_base(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done,
const char *name)
{
struct aa_proxy *proxy = inode->i_private;
struct aa_label *label;
struct aa_profile *profile;
char *target;
int depth;
if (!dentry)
return ERR_PTR(-ECHILD);
label = aa_get_label_rcu(&proxy->label);
profile = labels_profile(label);
depth = profile_depth(profile);
target = gen_symlink_name(depth, profile->rawdata->name, name);
aa_put_label(label);
if (IS_ERR(target))
return target;
set_delayed_call(done, rawdata_link_cb, target);
return target;
}
static const char *rawdata_get_link_sha1(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
return rawdata_get_link_base(dentry, inode, done, "sha1");
}
static const char *rawdata_get_link_abi(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
return rawdata_get_link_base(dentry, inode, done, "abi");
}
static const char *rawdata_get_link_data(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
return rawdata_get_link_base(dentry, inode, done, "raw_data");
}
static const struct inode_operations rawdata_link_sha1_iops = {
.get_link = rawdata_get_link_sha1,
};
static const struct inode_operations rawdata_link_abi_iops = {
.get_link = rawdata_get_link_abi,
};
static const struct inode_operations rawdata_link_data_iops = {
.get_link = rawdata_get_link_data,
};
#endif /* CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
/*
* Requires: @profile->ns->lock held
*/
int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
{
struct aa_profile *child;
struct dentry *dent = NULL, *dir;
int error;
AA_BUG(!profile);
AA_BUG(!mutex_is_locked(&profiles_ns(profile)->lock));
if (!parent) {
struct aa_profile *p;
p = aa_deref_parent(profile);
dent = prof_dir(p);
/* adding to parent that previously didn't have children */
dent = aafs_create_dir("profiles", dent);
if (IS_ERR(dent))
goto fail;
prof_child_dir(p) = parent = dent;
}
if (!profile->dirname) {
int len, id_len;
len = mangle_name(profile->base.name, NULL);
id_len = snprintf(NULL, 0, ".%ld", profile->ns->uniq_id);
profile->dirname = kmalloc(len + id_len + 1, GFP_KERNEL);
if (!profile->dirname) {
error = -ENOMEM;
goto fail2;
}
mangle_name(profile->base.name, profile->dirname);
sprintf(profile->dirname + len, ".%ld", profile->ns->uniq_id++);
}
dent = aafs_create_dir(profile->dirname, parent);
if (IS_ERR(dent))
goto fail;
prof_dir(profile) = dir = dent;
dent = create_profile_file(dir, "name", profile,
&seq_profile_name_fops);
if (IS_ERR(dent))
goto fail;
profile->dents[AAFS_PROF_NAME] = dent;
dent = create_profile_file(dir, "mode", profile,
&seq_profile_mode_fops);
if (IS_ERR(dent))
goto fail;
profile->dents[AAFS_PROF_MODE] = dent;
dent = create_profile_file(dir, "attach", profile,
&seq_profile_attach_fops);
if (IS_ERR(dent))
goto fail;
profile->dents[AAFS_PROF_ATTACH] = dent;
if (profile->hash) {
dent = create_profile_file(dir, "sha1", profile,
&seq_profile_hash_fops);
if (IS_ERR(dent))
goto fail;
profile->dents[AAFS_PROF_HASH] = dent;
}
#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
if (profile->rawdata) {
if (aa_g_hash_policy) {
dent = aafs_create("raw_sha1", S_IFLNK | 0444, dir,
profile->label.proxy, NULL, NULL,
&rawdata_link_sha1_iops);
if (IS_ERR(dent))
goto fail;
aa_get_proxy(profile->label.proxy);
profile->dents[AAFS_PROF_RAW_HASH] = dent;
}
dent = aafs_create("raw_abi", S_IFLNK | 0444, dir,
profile->label.proxy, NULL, NULL,
&rawdata_link_abi_iops);
if (IS_ERR(dent))
goto fail;
aa_get_proxy(profile->label.proxy);
profile->dents[AAFS_PROF_RAW_ABI] = dent;
dent = aafs_create("raw_data", S_IFLNK | 0444, dir,
profile->label.proxy, NULL, NULL,
&rawdata_link_data_iops);
if (IS_ERR(dent))
goto fail;
aa_get_proxy(profile->label.proxy);
profile->dents[AAFS_PROF_RAW_DATA] = dent;
}
#endif /*CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */
list_for_each_entry(child, &profile->base.profiles, base.list) {
error = __aafs_profile_mkdir(child, prof_child_dir(profile));
if (error)
goto fail2;
}
return 0;
fail:
error = PTR_ERR(dent);
fail2:
__aafs_profile_rmdir(profile);
return error;
}
static int ns_mkdir_op(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
struct aa_ns *ns, *parent;
/* TODO: improve permission check */
struct aa_label *label;
int error;
label = begin_current_label_crit_section();
error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
end_current_label_crit_section(label);
if (error)
return error;
parent = aa_get_ns(dir->i_private);
AA_BUG(d_inode(ns_subns_dir(parent)) != dir);
/* we have to unlock and then relock to get locking order right
* for pin_fs
*/
inode_unlock(dir);
error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count);
mutex_lock_nested(&parent->lock, parent->level);
inode_lock_nested(dir, I_MUTEX_PARENT);
if (error)
goto out;
error = __aafs_setup_d_inode(dir, dentry, mode | S_IFDIR, NULL,
NULL, NULL, NULL);
if (error)
goto out_pin;
ns = __aa_find_or_create_ns(parent, READ_ONCE(dentry->d_name.name),
dentry);
if (IS_ERR(ns)) {
error = PTR_ERR(ns);
ns = NULL;
}
aa_put_ns(ns); /* list ref remains */
out_pin:
if (error)
simple_release_fs(&aafs_mnt, &aafs_count);
out:
mutex_unlock(&parent->lock);
aa_put_ns(parent);
return error;
}
static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
{
struct aa_ns *ns, *parent;
/* TODO: improve permission check */
struct aa_label *label;
int error;
label = begin_current_label_crit_section();
error = aa_may_manage_policy(label, NULL, AA_MAY_LOAD_POLICY);
end_current_label_crit_section(label);
if (error)
return error;
parent = aa_get_ns(dir->i_private);
/* rmdir calls the generic securityfs functions to remove files
* from the apparmor dir. It is up to the apparmor ns locking
* to avoid races.
*/
inode_unlock(dir);
inode_unlock(dentry->d_inode);
mutex_lock_nested(&parent->lock, parent->level);
ns = aa_get_ns(__aa_findn_ns(&parent->sub_ns, dentry->d_name.name,
dentry->d_name.len));
if (!ns) {
error = -ENOENT;
goto out;
}
AA_BUG(ns_dir(ns) != dentry);
__aa_remove_ns(ns);
aa_put_ns(ns);
out:
mutex_unlock(&parent->lock);
inode_lock_nested(dir, I_MUTEX_PARENT);
inode_lock(dentry->d_inode);
aa_put_ns(parent);
return error;
}
static const struct inode_operations ns_dir_inode_operations = {
.lookup = simple_lookup,
.mkdir = ns_mkdir_op,
.rmdir = ns_rmdir_op,
};
static void __aa_fs_list_remove_rawdata(struct aa_ns *ns)
{
struct aa_loaddata *ent, *tmp;
AA_BUG(!mutex_is_locked(&ns->lock));
list_for_each_entry_safe(ent, tmp, &ns->rawdata_list, list)
__aa_fs_remove_rawdata(ent);
}
/*
*
* Requires: @ns->lock held
*/
void __aafs_ns_rmdir(struct aa_ns *ns)
{
struct aa_ns *sub;
struct aa_profile *child;
int i;
if (!ns)
return;
AA_BUG(!mutex_is_locked(&ns->lock));
list_for_each_entry(child, &ns->base.profiles, base.list)
__aafs_profile_rmdir(child);
list_for_each_entry(sub, &ns->sub_ns, base.list) {
mutex_lock_nested(&sub->lock, sub->level);
__aafs_ns_rmdir(sub);
mutex_unlock(&sub->lock);
}
__aa_fs_list_remove_rawdata(ns);
if (ns_subns_dir(ns)) {
sub = d_inode(ns_subns_dir(ns))->i_private;
aa_put_ns(sub);
}
if (ns_subload(ns)) {
sub = d_inode(ns_subload(ns))->i_private;
aa_put_ns(sub);
}
if (ns_subreplace(ns)) {
sub = d_inode(ns_subreplace(ns))->i_private;
aa_put_ns(sub);
}
if (ns_subremove(ns)) {
sub = d_inode(ns_subremove(ns))->i_private;
aa_put_ns(sub);
}
if (ns_subrevision(ns)) {
sub = d_inode(ns_subrevision(ns))->i_private;
aa_put_ns(sub);
}
for (i = AAFS_NS_SIZEOF - 1; i >= 0; --i) {
aafs_remove(ns->dents[i]);
ns->dents[i] = NULL;
}
}
/* assumes cleanup in caller */
static int __aafs_ns_mkdir_entries(struct aa_ns *ns, struct dentry *dir)
{
struct dentry *dent;
AA_BUG(!ns);
AA_BUG(!dir);
dent = aafs_create_dir("profiles", dir);
if (IS_ERR(dent))
return PTR_ERR(dent);
ns_subprofs_dir(ns) = dent;
dent = aafs_create_dir("raw_data", dir);
if (IS_ERR(dent))
return PTR_ERR(dent);
ns_subdata_dir(ns) = dent;
dent = aafs_create_file("revision", 0444, dir, ns,
&aa_fs_ns_revision_fops);
if (IS_ERR(dent))
return PTR_ERR(dent);
aa_get_ns(ns);
ns_subrevision(ns) = dent;
dent = aafs_create_file(".load", 0640, dir, ns,
&aa_fs_profile_load);
if (IS_ERR(dent))
return PTR_ERR(dent);
aa_get_ns(ns);
ns_subload(ns) = dent;
dent = aafs_create_file(".replace", 0640, dir, ns,
&aa_fs_profile_replace);
if (IS_ERR(dent))
return PTR_ERR(dent);
aa_get_ns(ns);
ns_subreplace(ns) = dent;
dent = aafs_create_file(".remove", 0640, dir, ns,
&aa_fs_profile_remove);
if (IS_ERR(dent))
return PTR_ERR(dent);
aa_get_ns(ns);
ns_subremove(ns) = dent;
/* use create_dentry so we can supply private data */
dent = aafs_create("namespaces", S_IFDIR | 0755, dir, ns, NULL, NULL,
&ns_dir_inode_operations);
if (IS_ERR(dent))
return PTR_ERR(dent);
aa_get_ns(ns);
ns_subns_dir(ns) = dent;
return 0;
}
/*
* Requires: @ns->lock held
*/
int __aafs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name,
struct dentry *dent)
{
struct aa_ns *sub;
struct aa_profile *child;
struct dentry *dir;
int error;
AA_BUG(!ns);
AA_BUG(!parent);
AA_BUG(!mutex_is_locked(&ns->lock));
if (!name)
name = ns->base.name;
if (!dent) {
/* create ns dir if it doesn't already exist */
dent = aafs_create_dir(name, parent);
if (IS_ERR(dent))
goto fail;
} else
dget(dent);
ns_dir(ns) = dir = dent;
error = __aafs_ns_mkdir_entries(ns, dir);
if (error)
goto fail2;
/* profiles */
list_for_each_entry(child, &ns->base.profiles, base.list) {
error = __aafs_profile_mkdir(child, ns_subprofs_dir(ns));
if (error)
goto fail2;
}
/* subnamespaces */
list_for_each_entry(sub, &ns->sub_ns, base.list) {
mutex_lock_nested(&sub->lock, sub->level);
error = __aafs_ns_mkdir(sub, ns_subns_dir(ns), NULL, NULL);
mutex_unlock(&sub->lock);
if (error)
goto fail2;
}
return 0;
fail:
error = PTR_ERR(dent);
fail2:
__aafs_ns_rmdir(ns);
return error;
}
/**
* __next_ns - find the next namespace to list
* @root: root namespace to stop search at (NOT NULL)
* @ns: current ns position (NOT NULL)
*
* Find the next namespace from @ns under @root and handle all locking needed
* while switching current namespace.
*
* Returns: next namespace or NULL if at last namespace under @root
* Requires: ns->parent->lock to be held
* NOTE: will not unlock root->lock
*/
static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns)
{
struct aa_ns *parent, *next;
AA_BUG(!root);
AA_BUG(!ns);
AA_BUG(ns != root && !mutex_is_locked(&ns->parent->lock));
/* is next namespace a child */
if (!list_empty(&ns->sub_ns)) {
next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
mutex_lock_nested(&next->lock, next->level);
return next;
}
/* check if the next ns is a sibling, parent, gp, .. */
parent = ns->parent;
while (ns != root) {
mutex_unlock(&ns->lock);
next = list_next_entry(ns, base.list);
if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
mutex_lock_nested(&next->lock, next->level);
return next;
}
ns = parent;
parent = parent->parent;
}
return NULL;
}
/**
* __first_profile - find the first profile in a namespace
* @root: namespace that is root of profiles being displayed (NOT NULL)
* @ns: namespace to start in (NOT NULL)
*
* Returns: unrefcounted profile or NULL if no profile
* Requires: profile->ns.lock to be held
*/
static struct aa_profile *__first_profile(struct aa_ns *root,
struct aa_ns *ns)
{
AA_BUG(!root);
AA_BUG(ns && !mutex_is_locked(&ns->lock));
for (; ns; ns = __next_ns(root, ns)) {
if (!list_empty(&ns->base.profiles))
return list_first_entry(&ns->base.profiles,
struct aa_profile, base.list);
}
return NULL;
}
/**
* __next_profile - step to the next profile in a profile tree
* @p: current profile in tree (NOT NULL)
*
* Perform a depth first traversal on the profile tree in a namespace
*
* Returns: next profile or NULL if done
* Requires: profile->ns.lock to be held
*/
static struct aa_profile *__next_profile(struct aa_profile *p)
{
struct aa_profile *parent;
struct aa_ns *ns = p->ns;
AA_BUG(!mutex_is_locked(&profiles_ns(p)->lock));
/* is next profile a child */
if (!list_empty(&p->base.profiles))
return list_first_entry(&p->base.profiles, typeof(*p),
base.list);
/* is next profile a sibling, parent sibling, gp, sibling, .. */
parent = rcu_dereference_protected(p->parent,
mutex_is_locked(&p->ns->lock));
while (parent) {
p = list_next_entry(p, base.list);
if (!list_entry_is_head(p, &parent->base.profiles, base.list))
return p;
p = parent;
parent = rcu_dereference_protected(parent->parent,
mutex_is_locked(&parent->ns->lock));
}
/* is next another profile in the namespace */
p = list_next_entry(p, base.list);
if (!list_entry_is_head(p, &ns->base.profiles, base.list))
return p;
return NULL;
}
/**
* next_profile - step to the next profile in where ever it may be
* @root: root namespace (NOT NULL)
* @profile: current profile (NOT NULL)
*
* Returns: next profile or NULL if there isn't one
*/
static struct aa_profile *next_profile(struct aa_ns *root,
struct aa_profile *profile)
{
struct aa_profile *next = __next_profile(profile);
if (next)
return next;
/* finished all profiles in namespace move to next namespace */
return __first_profile(root, __next_ns(root, profile->ns));
}
/**
* p_start - start a depth first traversal of profile tree
* @f: seq_file to fill
* @pos: current position
*
* Returns: first profile under current namespace or NULL if none found
*
* acquires first ns->lock
*/
static void *p_start(struct seq_file *f, loff_t *pos)
{
struct aa_profile *profile = NULL;
struct aa_ns *root = aa_get_current_ns();
loff_t l = *pos;
f->private = root;
/* find the first profile */
mutex_lock_nested(&root->lock, root->level);
profile = __first_profile(root, root);
/* skip to position */
for (; profile && l > 0; l--)
profile = next_profile(root, profile);
return profile;
}
/**
* p_next - read the next profile entry
* @f: seq_file to fill
* @p: profile previously returned
* @pos: current position
*
* Returns: next profile after @p or NULL if none
*
* may acquire/release locks in namespace tree as necessary
*/
static void *p_next(struct seq_file *f, void *p, loff_t *pos)
{
struct aa_profile *profile = p;
struct aa_ns *ns = f->private;
(*pos)++;
return next_profile(ns, profile);
}
/**
* p_stop - stop depth first traversal
* @f: seq_file we are filling
* @p: the last profile writen
*
* Release all locking done by p_start/p_next on namespace tree
*/
static void p_stop(struct seq_file *f, void *p)
{
struct aa_profile *profile = p;
struct aa_ns *root = f->private, *ns;
if (profile) {
for (ns = profile->ns; ns && ns != root; ns = ns->parent)
mutex_unlock(&ns->lock);
}
mutex_unlock(&root->lock);
aa_put_ns(root);
}
/**
* seq_show_profile - show a profile entry
* @f: seq_file to file
* @p: current position (profile) (NOT NULL)
*
* Returns: error on failure
*/
static int seq_show_profile(struct seq_file *f, void *p)
{
struct aa_profile *profile = (struct aa_profile *)p;
struct aa_ns *root = f->private;
aa_label_seq_xprint(f, root, &profile->label,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS, GFP_KERNEL);
seq_putc(f, '\n');
return 0;
}
static const struct seq_operations aa_sfs_profiles_op = {
.start = p_start,
.next = p_next,
.stop = p_stop,
.show = seq_show_profile,
};
static int profiles_open(struct inode *inode, struct file *file)
{
if (!aa_current_policy_view_capable(NULL))
return -EACCES;
return seq_open(file, &aa_sfs_profiles_op);
}
static int profiles_release(struct inode *inode, struct file *file)
{
return seq_release(inode, file);
}
static const struct file_operations aa_sfs_profiles_fops = {
.open = profiles_open,
.read = seq_read,
.llseek = seq_lseek,
.release = profiles_release,
};
/** Base file system setup **/
static struct aa_sfs_entry aa_sfs_entry_file[] = {
AA_SFS_FILE_STRING("mask",
"create read write exec append mmap_exec link lock"),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_ptrace[] = {
AA_SFS_FILE_STRING("mask", "read trace"),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_signal[] = {
AA_SFS_FILE_STRING("mask", AA_SFS_SIG_MASK),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_attach[] = {
AA_SFS_FILE_BOOLEAN("xattr", 1),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_domain[] = {
AA_SFS_FILE_BOOLEAN("change_hat", 1),
AA_SFS_FILE_BOOLEAN("change_hatv", 1),
AA_SFS_FILE_BOOLEAN("change_onexec", 1),
AA_SFS_FILE_BOOLEAN("change_profile", 1),
AA_SFS_FILE_BOOLEAN("stack", 1),
AA_SFS_FILE_BOOLEAN("fix_binfmt_elf_mmap", 1),
AA_SFS_FILE_BOOLEAN("post_nnp_subset", 1),
AA_SFS_FILE_BOOLEAN("computed_longest_left", 1),
AA_SFS_DIR("attach_conditions", aa_sfs_entry_attach),
AA_SFS_FILE_STRING("version", "1.2"),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_versions[] = {
AA_SFS_FILE_BOOLEAN("v5", 1),
AA_SFS_FILE_BOOLEAN("v6", 1),
AA_SFS_FILE_BOOLEAN("v7", 1),
AA_SFS_FILE_BOOLEAN("v8", 1),
AA_SFS_FILE_BOOLEAN("v9", 1),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_policy[] = {
AA_SFS_DIR("versions", aa_sfs_entry_versions),
AA_SFS_FILE_BOOLEAN("set_load", 1),
/* number of out of band transitions supported */
AA_SFS_FILE_U64("outofband", MAX_OOB_SUPPORTED),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_mount[] = {
AA_SFS_FILE_STRING("mask", "mount umount pivot_root"),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_ns[] = {
AA_SFS_FILE_BOOLEAN("profile", 1),
AA_SFS_FILE_BOOLEAN("pivot_root", 0),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_query_label[] = {
AA_SFS_FILE_STRING("perms", "allow deny audit quiet"),
AA_SFS_FILE_BOOLEAN("data", 1),
AA_SFS_FILE_BOOLEAN("multi_transaction", 1),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_query[] = {
AA_SFS_DIR("label", aa_sfs_entry_query_label),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_features[] = {
AA_SFS_DIR("policy", aa_sfs_entry_policy),
AA_SFS_DIR("domain", aa_sfs_entry_domain),
AA_SFS_DIR("file", aa_sfs_entry_file),
AA_SFS_DIR("network_v8", aa_sfs_entry_network),
AA_SFS_DIR("mount", aa_sfs_entry_mount),
AA_SFS_DIR("namespaces", aa_sfs_entry_ns),
AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
AA_SFS_DIR("rlimit", aa_sfs_entry_rlimit),
AA_SFS_DIR("caps", aa_sfs_entry_caps),
AA_SFS_DIR("ptrace", aa_sfs_entry_ptrace),
AA_SFS_DIR("signal", aa_sfs_entry_signal),
AA_SFS_DIR("query", aa_sfs_entry_query),
{ }
};
static struct aa_sfs_entry aa_sfs_entry_apparmor[] = {
AA_SFS_FILE_FOPS(".access", 0666, &aa_sfs_access),
AA_SFS_FILE_FOPS(".stacked", 0444, &seq_ns_stacked_fops),
AA_SFS_FILE_FOPS(".ns_stacked", 0444, &seq_ns_nsstacked_fops),
AA_SFS_FILE_FOPS(".ns_level", 0444, &seq_ns_level_fops),
AA_SFS_FILE_FOPS(".ns_name", 0444, &seq_ns_name_fops),
AA_SFS_FILE_FOPS("profiles", 0444, &aa_sfs_profiles_fops),
AA_SFS_FILE_FOPS("raw_data_compression_level_min", 0444, &seq_ns_compress_min_fops),
AA_SFS_FILE_FOPS("raw_data_compression_level_max", 0444, &seq_ns_compress_max_fops),
AA_SFS_DIR("features", aa_sfs_entry_features),
{ }
};
static struct aa_sfs_entry aa_sfs_entry =
AA_SFS_DIR("apparmor", aa_sfs_entry_apparmor);
/**
* entry_create_file - create a file entry in the apparmor securityfs
* @fs_file: aa_sfs_entry to build an entry for (NOT NULL)
* @parent: the parent dentry in the securityfs
*
* Use entry_remove_file to remove entries created with this fn.
*/
static int __init entry_create_file(struct aa_sfs_entry *fs_file,
struct dentry *parent)
{
int error = 0;
fs_file->dentry = securityfs_create_file(fs_file->name,
S_IFREG | fs_file->mode,
parent, fs_file,
fs_file->file_ops);
if (IS_ERR(fs_file->dentry)) {
error = PTR_ERR(fs_file->dentry);
fs_file->dentry = NULL;
}
return error;
}
static void __init entry_remove_dir(struct aa_sfs_entry *fs_dir);
/**
* entry_create_dir - recursively create a directory entry in the securityfs
* @fs_dir: aa_sfs_entry (and all child entries) to build (NOT NULL)
* @parent: the parent dentry in the securityfs
*
* Use entry_remove_dir to remove entries created with this fn.
*/
static int __init entry_create_dir(struct aa_sfs_entry *fs_dir,
struct dentry *parent)
{
struct aa_sfs_entry *fs_file;
struct dentry *dir;
int error;
dir = securityfs_create_dir(fs_dir->name, parent);
if (IS_ERR(dir))
return PTR_ERR(dir);
fs_dir->dentry = dir;
for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
if (fs_file->v_type == AA_SFS_TYPE_DIR)
error = entry_create_dir(fs_file, fs_dir->dentry);
else
error = entry_create_file(fs_file, fs_dir->dentry);
if (error)
goto failed;
}
return 0;
failed:
entry_remove_dir(fs_dir);
return error;
}
/**
* entry_remove_file - drop a single file entry in the apparmor securityfs
* @fs_file: aa_sfs_entry to detach from the securityfs (NOT NULL)
*/
static void __init entry_remove_file(struct aa_sfs_entry *fs_file)
{
if (!fs_file->dentry)
return;
securityfs_remove(fs_file->dentry);
fs_file->dentry = NULL;
}
/**
* entry_remove_dir - recursively drop a directory entry from the securityfs
* @fs_dir: aa_sfs_entry (and all child entries) to detach (NOT NULL)
*/
static void __init entry_remove_dir(struct aa_sfs_entry *fs_dir)
{
struct aa_sfs_entry *fs_file;
for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
if (fs_file->v_type == AA_SFS_TYPE_DIR)
entry_remove_dir(fs_file);
else
entry_remove_file(fs_file);
}
entry_remove_file(fs_dir);
}
/**
* aa_destroy_aafs - cleanup and free aafs
*
* releases dentries allocated by aa_create_aafs
*/
void __init aa_destroy_aafs(void)
{
entry_remove_dir(&aa_sfs_entry);
}
#define NULL_FILE_NAME ".null"
struct path aa_null;
static int aa_mk_null_file(struct dentry *parent)
{
struct vfsmount *mount = NULL;
struct dentry *dentry;
struct inode *inode;
int count = 0;
int error = simple_pin_fs(parent->d_sb->s_type, &mount, &count);
if (error)
return error;
inode_lock(d_inode(parent));
dentry = lookup_one_len(NULL_FILE_NAME, parent, strlen(NULL_FILE_NAME));
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
goto out;
}
inode = new_inode(parent->d_inode->i_sb);
if (!inode) {
error = -ENOMEM;
goto out1;
}
inode->i_ino = get_next_ino();
inode->i_mode = S_IFCHR | S_IRUGO | S_IWUGO;
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO,
MKDEV(MEM_MAJOR, 3));
d_instantiate(dentry, inode);
aa_null.dentry = dget(dentry);
aa_null.mnt = mntget(mount);
error = 0;
out1:
dput(dentry);
out:
inode_unlock(d_inode(parent));
simple_release_fs(&mount, &count);
return error;
}
static const char *policy_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
struct aa_ns *ns;
struct path path;
int error;
if (!dentry)
return ERR_PTR(-ECHILD);
ns = aa_get_current_ns();
path.mnt = mntget(aafs_mnt);
path.dentry = dget(ns_dir(ns));
error = nd_jump_link(&path);
aa_put_ns(ns);
return ERR_PTR(error);
}
static int policy_readlink(struct dentry *dentry, char __user *buffer,
int buflen)
{
char name[32];
int res;
res = snprintf(name, sizeof(name), "%s:[%lu]", AAFS_NAME,
d_inode(dentry)->i_ino);
if (res > 0 && res < sizeof(name))
res = readlink_copy(buffer, buflen, name);
else
res = -ENOENT;
return res;
}
static const struct inode_operations policy_link_iops = {
.readlink = policy_readlink,
.get_link = policy_get_link,
};
/**
* aa_create_aafs - create the apparmor security filesystem
*
* dentries created here are released by aa_destroy_aafs
*
* Returns: error on failure
*/
static int __init aa_create_aafs(void)
{
struct dentry *dent;
int error;
if (!apparmor_initialized)
return 0;
if (aa_sfs_entry.dentry) {
AA_ERROR("%s: AppArmor securityfs already exists\n", __func__);
return -EEXIST;
}
/* setup apparmorfs used to virtualize policy/ */
aafs_mnt = kern_mount(&aafs_ops);
if (IS_ERR(aafs_mnt))
panic("can't set apparmorfs up\n");
aafs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
/* Populate fs tree. */
error = entry_create_dir(&aa_sfs_entry, NULL);
if (error)
goto error;
dent = securityfs_create_file(".load", 0666, aa_sfs_entry.dentry,
NULL, &aa_fs_profile_load);
if (IS_ERR(dent))
goto dent_error;
ns_subload(root_ns) = dent;
dent = securityfs_create_file(".replace", 0666, aa_sfs_entry.dentry,
NULL, &aa_fs_profile_replace);
if (IS_ERR(dent))
goto dent_error;
ns_subreplace(root_ns) = dent;
dent = securityfs_create_file(".remove", 0666, aa_sfs_entry.dentry,
NULL, &aa_fs_profile_remove);
if (IS_ERR(dent))
goto dent_error;
ns_subremove(root_ns) = dent;
dent = securityfs_create_file("revision", 0444, aa_sfs_entry.dentry,
NULL, &aa_fs_ns_revision_fops);
if (IS_ERR(dent))
goto dent_error;
ns_subrevision(root_ns) = dent;
/* policy tree referenced by magic policy symlink */
mutex_lock_nested(&root_ns->lock, root_ns->level);
error = __aafs_ns_mkdir(root_ns, aafs_mnt->mnt_root, ".policy",
aafs_mnt->mnt_root);
mutex_unlock(&root_ns->lock);
if (error)
goto error;
/* magic symlink similar to nsfs redirects based on task policy */
dent = securityfs_create_symlink("policy", aa_sfs_entry.dentry,
NULL, &policy_link_iops);
if (IS_ERR(dent))
goto dent_error;
error = aa_mk_null_file(aa_sfs_entry.dentry);
if (error)
goto error;
/* TODO: add default profile to apparmorfs */
/* Report that AppArmor fs is enabled */
aa_info_message("AppArmor Filesystem Enabled");
return 0;
dent_error:
error = PTR_ERR(dent);
error:
aa_destroy_aafs();
AA_ERROR("Error creating AppArmor securityfs\n");
return error;
}
fs_initcall(aa_create_aafs);
| linux-master | security/apparmor/apparmorfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor LSM hooks.
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include <linux/lsm_hooks.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/ptrace.h>
#include <linux/ctype.h>
#include <linux/sysctl.h>
#include <linux/audit.h>
#include <linux/user_namespace.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/zstd.h>
#include <net/sock.h>
#include <uapi/linux/mount.h>
#include "include/apparmor.h"
#include "include/apparmorfs.h"
#include "include/audit.h"
#include "include/capability.h"
#include "include/cred.h"
#include "include/file.h"
#include "include/ipc.h"
#include "include/net.h"
#include "include/path.h"
#include "include/label.h"
#include "include/policy.h"
#include "include/policy_ns.h"
#include "include/procattr.h"
#include "include/mount.h"
#include "include/secid.h"
/* Flag indicating whether initialization completed */
int apparmor_initialized;
union aa_buffer {
struct list_head list;
DECLARE_FLEX_ARRAY(char, buffer);
};
#define RESERVE_COUNT 2
static int reserve_count = RESERVE_COUNT;
static int buffer_count;
static LIST_HEAD(aa_global_buffers);
static DEFINE_SPINLOCK(aa_buffers_lock);
/*
* LSM hook functions
*/
/*
* put the associated labels
*/
static void apparmor_cred_free(struct cred *cred)
{
aa_put_label(cred_label(cred));
set_cred_label(cred, NULL);
}
/*
* allocate the apparmor part of blank credentials
*/
static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
set_cred_label(cred, NULL);
return 0;
}
/*
* prepare new cred label for modification by prepare_cred block
*/
static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
set_cred_label(new, aa_get_newest_label(cred_label(old)));
return 0;
}
/*
* transfer the apparmor data to a blank set of creds
*/
static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
{
set_cred_label(new, aa_get_newest_label(cred_label(old)));
}
static void apparmor_task_free(struct task_struct *task)
{
aa_free_task_ctx(task_ctx(task));
}
static int apparmor_task_alloc(struct task_struct *task,
unsigned long clone_flags)
{
struct aa_task_ctx *new = task_ctx(task);
aa_dup_task_ctx(new, task_ctx(current));
return 0;
}
static int apparmor_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
struct aa_label *tracer, *tracee;
int error;
tracer = __begin_current_label_crit_section();
tracee = aa_get_task_label(child);
error = aa_may_ptrace(tracer, tracee,
(mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
: AA_PTRACE_TRACE);
aa_put_label(tracee);
__end_current_label_crit_section(tracer);
return error;
}
static int apparmor_ptrace_traceme(struct task_struct *parent)
{
struct aa_label *tracer, *tracee;
int error;
tracee = __begin_current_label_crit_section();
tracer = aa_get_task_label(parent);
error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
aa_put_label(tracer);
__end_current_label_crit_section(tracee);
return error;
}
/* Derived from security/commoncap.c:cap_capget */
static int apparmor_capget(const struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
struct aa_label *label;
const struct cred *cred;
rcu_read_lock();
cred = __task_cred(target);
label = aa_get_newest_cred_label(cred);
/*
* cap_capget is stacked ahead of this and will
* initialize effective and permitted.
*/
if (!unconfined(label)) {
struct aa_profile *profile;
struct label_it i;
label_for_each_confined(i, label, profile) {
struct aa_ruleset *rules;
if (COMPLAIN_MODE(profile))
continue;
rules = list_first_entry(&profile->rules,
typeof(*rules), list);
*effective = cap_intersect(*effective,
rules->caps.allow);
*permitted = cap_intersect(*permitted,
rules->caps.allow);
}
}
rcu_read_unlock();
aa_put_label(label);
return 0;
}
static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
int cap, unsigned int opts)
{
struct aa_label *label;
int error = 0;
label = aa_get_newest_cred_label(cred);
if (!unconfined(label))
error = aa_capable(label, cap, opts);
aa_put_label(label);
return error;
}
/**
* common_perm - basic common permission check wrapper fn for paths
* @op: operation being checked
* @path: path to check permission of (NOT NULL)
* @mask: requested permissions mask
* @cond: conditional info for the permission request (NOT NULL)
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm(const char *op, const struct path *path, u32 mask,
struct path_cond *cond)
{
struct aa_label *label;
int error = 0;
label = __begin_current_label_crit_section();
if (!unconfined(label))
error = aa_path_perm(op, label, path, 0, mask, cond);
__end_current_label_crit_section(label);
return error;
}
/**
* common_perm_cond - common permission wrapper around inode cond
* @op: operation being checked
* @path: location to check (NOT NULL)
* @mask: requested permissions mask
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_cond(const char *op, const struct path *path, u32 mask)
{
vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_idmap(path->mnt),
d_backing_inode(path->dentry));
struct path_cond cond = {
vfsuid_into_kuid(vfsuid),
d_backing_inode(path->dentry)->i_mode
};
if (!path_mediated_fs(path->dentry))
return 0;
return common_perm(op, path, mask, &cond);
}
/**
* common_perm_dir_dentry - common permission wrapper when path is dir, dentry
* @op: operation being checked
* @dir: directory of the dentry (NOT NULL)
* @dentry: dentry to check (NOT NULL)
* @mask: requested permissions mask
* @cond: conditional info for the permission request (NOT NULL)
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_dir_dentry(const char *op, const struct path *dir,
struct dentry *dentry, u32 mask,
struct path_cond *cond)
{
struct path path = { .mnt = dir->mnt, .dentry = dentry };
return common_perm(op, &path, mask, cond);
}
/**
* common_perm_rm - common permission wrapper for operations doing rm
* @op: operation being checked
* @dir: directory that the dentry is in (NOT NULL)
* @dentry: dentry being rm'd (NOT NULL)
* @mask: requested permission mask
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_rm(const char *op, const struct path *dir,
struct dentry *dentry, u32 mask)
{
struct inode *inode = d_backing_inode(dentry);
struct path_cond cond = { };
vfsuid_t vfsuid;
if (!inode || !path_mediated_fs(dentry))
return 0;
vfsuid = i_uid_into_vfsuid(mnt_idmap(dir->mnt), inode);
cond.uid = vfsuid_into_kuid(vfsuid);
cond.mode = inode->i_mode;
return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
}
/**
* common_perm_create - common permission wrapper for operations doing create
* @op: operation being checked
* @dir: directory that dentry will be created in (NOT NULL)
* @dentry: dentry to create (NOT NULL)
* @mask: request permission mask
* @mode: created file mode
*
* Returns: %0 else error code if error or permission denied
*/
static int common_perm_create(const char *op, const struct path *dir,
struct dentry *dentry, u32 mask, umode_t mode)
{
struct path_cond cond = { current_fsuid(), mode };
if (!path_mediated_fs(dir->dentry))
return 0;
return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
}
static int apparmor_path_unlink(const struct path *dir, struct dentry *dentry)
{
return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE);
}
static int apparmor_path_mkdir(const struct path *dir, struct dentry *dentry,
umode_t mode)
{
return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
S_IFDIR);
}
static int apparmor_path_rmdir(const struct path *dir, struct dentry *dentry)
{
return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE);
}
static int apparmor_path_mknod(const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev)
{
return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
}
static int apparmor_path_truncate(const struct path *path)
{
return common_perm_cond(OP_TRUNC, path, MAY_WRITE | AA_MAY_SETATTR);
}
static int apparmor_file_truncate(struct file *file)
{
return apparmor_path_truncate(&file->f_path);
}
static int apparmor_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name)
{
return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE,
S_IFLNK);
}
static int apparmor_path_link(struct dentry *old_dentry, const struct path *new_dir,
struct dentry *new_dentry)
{
struct aa_label *label;
int error = 0;
if (!path_mediated_fs(old_dentry))
return 0;
label = begin_current_label_crit_section();
if (!unconfined(label))
error = aa_path_link(label, old_dentry, new_dir, new_dentry);
end_current_label_crit_section(label);
return error;
}
static int apparmor_path_rename(const struct path *old_dir, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry,
const unsigned int flags)
{
struct aa_label *label;
int error = 0;
if (!path_mediated_fs(old_dentry))
return 0;
if ((flags & RENAME_EXCHANGE) && !path_mediated_fs(new_dentry))
return 0;
label = begin_current_label_crit_section();
if (!unconfined(label)) {
struct mnt_idmap *idmap = mnt_idmap(old_dir->mnt);
vfsuid_t vfsuid;
struct path old_path = { .mnt = old_dir->mnt,
.dentry = old_dentry };
struct path new_path = { .mnt = new_dir->mnt,
.dentry = new_dentry };
struct path_cond cond = {
.mode = d_backing_inode(old_dentry)->i_mode
};
vfsuid = i_uid_into_vfsuid(idmap, d_backing_inode(old_dentry));
cond.uid = vfsuid_into_kuid(vfsuid);
if (flags & RENAME_EXCHANGE) {
struct path_cond cond_exchange = {
.mode = d_backing_inode(new_dentry)->i_mode,
};
vfsuid = i_uid_into_vfsuid(idmap, d_backing_inode(old_dentry));
cond_exchange.uid = vfsuid_into_kuid(vfsuid);
error = aa_path_perm(OP_RENAME_SRC, label, &new_path, 0,
MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
AA_MAY_SETATTR | AA_MAY_DELETE,
&cond_exchange);
if (!error)
error = aa_path_perm(OP_RENAME_DEST, label, &old_path,
0, MAY_WRITE | AA_MAY_SETATTR |
AA_MAY_CREATE, &cond_exchange);
}
if (!error)
error = aa_path_perm(OP_RENAME_SRC, label, &old_path, 0,
MAY_READ | AA_MAY_GETATTR | MAY_WRITE |
AA_MAY_SETATTR | AA_MAY_DELETE,
&cond);
if (!error)
error = aa_path_perm(OP_RENAME_DEST, label, &new_path,
0, MAY_WRITE | AA_MAY_SETATTR |
AA_MAY_CREATE, &cond);
}
end_current_label_crit_section(label);
return error;
}
static int apparmor_path_chmod(const struct path *path, umode_t mode)
{
return common_perm_cond(OP_CHMOD, path, AA_MAY_CHMOD);
}
static int apparmor_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
{
return common_perm_cond(OP_CHOWN, path, AA_MAY_CHOWN);
}
static int apparmor_inode_getattr(const struct path *path)
{
return common_perm_cond(OP_GETATTR, path, AA_MAY_GETATTR);
}
static int apparmor_file_open(struct file *file)
{
struct aa_file_ctx *fctx = file_ctx(file);
struct aa_label *label;
int error = 0;
if (!path_mediated_fs(file->f_path.dentry))
return 0;
/* If in exec, permission is handled by bprm hooks.
* Cache permissions granted by the previous exec check, with
* implicit read and executable mmap which are required to
* actually execute the image.
*/
if (current->in_execve) {
fctx->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
return 0;
}
label = aa_get_newest_cred_label(file->f_cred);
if (!unconfined(label)) {
struct mnt_idmap *idmap = file_mnt_idmap(file);
struct inode *inode = file_inode(file);
vfsuid_t vfsuid;
struct path_cond cond = {
.mode = inode->i_mode,
};
vfsuid = i_uid_into_vfsuid(idmap, inode);
cond.uid = vfsuid_into_kuid(vfsuid);
error = aa_path_perm(OP_OPEN, label, &file->f_path, 0,
aa_map_file_to_perms(file), &cond);
/* todo cache full allowed permissions set and state */
fctx->allow = aa_map_file_to_perms(file);
}
aa_put_label(label);
return error;
}
static int apparmor_file_alloc_security(struct file *file)
{
struct aa_file_ctx *ctx = file_ctx(file);
struct aa_label *label = begin_current_label_crit_section();
spin_lock_init(&ctx->lock);
rcu_assign_pointer(ctx->label, aa_get_label(label));
end_current_label_crit_section(label);
return 0;
}
static void apparmor_file_free_security(struct file *file)
{
struct aa_file_ctx *ctx = file_ctx(file);
if (ctx)
aa_put_label(rcu_access_pointer(ctx->label));
}
static int common_file_perm(const char *op, struct file *file, u32 mask,
bool in_atomic)
{
struct aa_label *label;
int error = 0;
/* don't reaudit files closed during inheritance */
if (file->f_path.dentry == aa_null.dentry)
return -EACCES;
label = __begin_current_label_crit_section();
error = aa_file_perm(op, label, file, mask, in_atomic);
__end_current_label_crit_section(label);
return error;
}
static int apparmor_file_receive(struct file *file)
{
return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file),
false);
}
static int apparmor_file_permission(struct file *file, int mask)
{
return common_file_perm(OP_FPERM, file, mask, false);
}
static int apparmor_file_lock(struct file *file, unsigned int cmd)
{
u32 mask = AA_MAY_LOCK;
if (cmd == F_WRLCK)
mask |= MAY_WRITE;
return common_file_perm(OP_FLOCK, file, mask, false);
}
static int common_mmap(const char *op, struct file *file, unsigned long prot,
unsigned long flags, bool in_atomic)
{
int mask = 0;
if (!file || !file_ctx(file))
return 0;
if (prot & PROT_READ)
mask |= MAY_READ;
/*
* Private mappings don't require write perms since they don't
* write back to the files
*/
if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE))
mask |= MAY_WRITE;
if (prot & PROT_EXEC)
mask |= AA_EXEC_MMAP;
return common_file_perm(op, file, mask, in_atomic);
}
static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
{
return common_mmap(OP_FMMAP, file, prot, flags, GFP_ATOMIC);
}
static int apparmor_file_mprotect(struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot)
{
return common_mmap(OP_FMPROT, vma->vm_file, prot,
!(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0,
false);
}
static int apparmor_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags, void *data)
{
struct aa_label *label;
int error = 0;
/* Discard magic */
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
flags &= ~MS_MGC_MSK;
flags &= ~AA_MS_IGNORE_MASK;
label = __begin_current_label_crit_section();
if (!unconfined(label)) {
if (flags & MS_REMOUNT)
error = aa_remount(label, path, flags, data);
else if (flags & MS_BIND)
error = aa_bind_mount(label, path, dev_name, flags);
else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE |
MS_UNBINDABLE))
error = aa_mount_change_type(label, path, flags);
else if (flags & MS_MOVE)
error = aa_move_mount(label, path, dev_name);
else
error = aa_new_mount(label, dev_name, path, type,
flags, data);
}
__end_current_label_crit_section(label);
return error;
}
static int apparmor_sb_umount(struct vfsmount *mnt, int flags)
{
struct aa_label *label;
int error = 0;
label = __begin_current_label_crit_section();
if (!unconfined(label))
error = aa_umount(label, mnt, flags);
__end_current_label_crit_section(label);
return error;
}
static int apparmor_sb_pivotroot(const struct path *old_path,
const struct path *new_path)
{
struct aa_label *label;
int error = 0;
label = aa_get_current_label();
if (!unconfined(label))
error = aa_pivotroot(label, old_path, new_path);
aa_put_label(label);
return error;
}
static int apparmor_getprocattr(struct task_struct *task, const char *name,
char **value)
{
int error = -ENOENT;
/* released below */
const struct cred *cred = get_task_cred(task);
struct aa_task_ctx *ctx = task_ctx(current);
struct aa_label *label = NULL;
if (strcmp(name, "current") == 0)
label = aa_get_newest_label(cred_label(cred));
else if (strcmp(name, "prev") == 0 && ctx->previous)
label = aa_get_newest_label(ctx->previous);
else if (strcmp(name, "exec") == 0 && ctx->onexec)
label = aa_get_newest_label(ctx->onexec);
else
error = -EINVAL;
if (label)
error = aa_getprocattr(label, value);
aa_put_label(label);
put_cred(cred);
return error;
}
static int apparmor_setprocattr(const char *name, void *value,
size_t size)
{
char *command, *largs = NULL, *args = value;
size_t arg_size;
int error;
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE,
OP_SETPROCATTR);
if (size == 0)
return -EINVAL;
/* AppArmor requires that the buffer must be null terminated atm */
if (args[size - 1] != '\0') {
/* null terminate */
largs = args = kmalloc(size + 1, GFP_KERNEL);
if (!args)
return -ENOMEM;
memcpy(args, value, size);
args[size] = '\0';
}
error = -EINVAL;
args = strim(args);
command = strsep(&args, " ");
if (!args)
goto out;
args = skip_spaces(args);
if (!*args)
goto out;
arg_size = size - (args - (largs ? largs : (char *) value));
if (strcmp(name, "current") == 0) {
if (strcmp(command, "changehat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_CHANGE_NOFLAGS);
} else if (strcmp(command, "permhat") == 0) {
error = aa_setprocattr_changehat(args, arg_size,
AA_CHANGE_TEST);
} else if (strcmp(command, "changeprofile") == 0) {
error = aa_change_profile(args, AA_CHANGE_NOFLAGS);
} else if (strcmp(command, "permprofile") == 0) {
error = aa_change_profile(args, AA_CHANGE_TEST);
} else if (strcmp(command, "stack") == 0) {
error = aa_change_profile(args, AA_CHANGE_STACK);
} else
goto fail;
} else if (strcmp(name, "exec") == 0) {
if (strcmp(command, "exec") == 0)
error = aa_change_profile(args, AA_CHANGE_ONEXEC);
else if (strcmp(command, "stack") == 0)
error = aa_change_profile(args, (AA_CHANGE_ONEXEC |
AA_CHANGE_STACK));
else
goto fail;
} else
/* only support the "current" and "exec" process attributes */
goto fail;
if (!error)
error = size;
out:
kfree(largs);
return error;
fail:
aad(&sa)->label = begin_current_label_crit_section();
aad(&sa)->info = name;
aad(&sa)->error = error = -EINVAL;
aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
end_current_label_crit_section(aad(&sa)->label);
goto out;
}
/**
* apparmor_bprm_committing_creds - do task cleanup on committing new creds
* @bprm: binprm for the exec (NOT NULL)
*/
static void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
{
struct aa_label *label = aa_current_raw_label();
struct aa_label *new_label = cred_label(bprm->cred);
/* bail out if unconfined or not changing profile */
if ((new_label->proxy == label->proxy) ||
(unconfined(new_label)))
return;
aa_inherit_files(bprm->cred, current->files);
current->pdeath_signal = 0;
/* reset soft limits and set hard limits for the new label */
__aa_transition_rlimits(label, new_label);
}
/**
* apparmor_bprm_committed_creds() - do cleanup after new creds committed
* @bprm: binprm for the exec (NOT NULL)
*/
static void apparmor_bprm_committed_creds(struct linux_binprm *bprm)
{
/* clear out temporary/transitional state from the context */
aa_clear_task_ctx_trans(task_ctx(current));
return;
}
static void apparmor_current_getsecid_subj(u32 *secid)
{
struct aa_label *label = aa_get_current_label();
*secid = label->secid;
aa_put_label(label);
}
static void apparmor_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
struct aa_label *label = aa_get_task_label(p);
*secid = label->secid;
aa_put_label(label);
}
static int apparmor_task_setrlimit(struct task_struct *task,
unsigned int resource, struct rlimit *new_rlim)
{
struct aa_label *label = __begin_current_label_crit_section();
int error = 0;
if (!unconfined(label))
error = aa_task_setrlimit(label, task, resource, new_rlim);
__end_current_label_crit_section(label);
return error;
}
static int apparmor_task_kill(struct task_struct *target, struct kernel_siginfo *info,
int sig, const struct cred *cred)
{
struct aa_label *cl, *tl;
int error;
if (cred) {
/*
* Dealing with USB IO specific behavior
*/
cl = aa_get_newest_cred_label(cred);
tl = aa_get_task_label(target);
error = aa_may_signal(cl, tl, sig);
aa_put_label(cl);
aa_put_label(tl);
return error;
}
cl = __begin_current_label_crit_section();
tl = aa_get_task_label(target);
error = aa_may_signal(cl, tl, sig);
aa_put_label(tl);
__end_current_label_crit_section(cl);
return error;
}
/**
* apparmor_sk_alloc_security - allocate and attach the sk_security field
*/
static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
{
struct aa_sk_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), flags);
if (!ctx)
return -ENOMEM;
SK_CTX(sk) = ctx;
return 0;
}
/**
* apparmor_sk_free_security - free the sk_security field
*/
static void apparmor_sk_free_security(struct sock *sk)
{
struct aa_sk_ctx *ctx = SK_CTX(sk);
SK_CTX(sk) = NULL;
aa_put_label(ctx->label);
aa_put_label(ctx->peer);
kfree(ctx);
}
/**
* apparmor_sk_clone_security - clone the sk_security field
*/
static void apparmor_sk_clone_security(const struct sock *sk,
struct sock *newsk)
{
struct aa_sk_ctx *ctx = SK_CTX(sk);
struct aa_sk_ctx *new = SK_CTX(newsk);
if (new->label)
aa_put_label(new->label);
new->label = aa_get_label(ctx->label);
if (new->peer)
aa_put_label(new->peer);
new->peer = aa_get_label(ctx->peer);
}
/**
* apparmor_socket_create - check perms before creating a new socket
*/
static int apparmor_socket_create(int family, int type, int protocol, int kern)
{
struct aa_label *label;
int error = 0;
AA_BUG(in_interrupt());
label = begin_current_label_crit_section();
if (!(kern || unconfined(label)))
error = af_select(family,
create_perm(label, family, type, protocol),
aa_af_perm(label, OP_CREATE, AA_MAY_CREATE,
family, type, protocol));
end_current_label_crit_section(label);
return error;
}
/**
* apparmor_socket_post_create - setup the per-socket security struct
*
* Note:
* - kernel sockets currently labeled unconfined but we may want to
* move to a special kernel label
* - socket may not have sk here if created with sock_create_lite or
* sock_alloc. These should be accept cases which will be handled in
* sock_graft.
*/
static int apparmor_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
struct aa_label *label;
if (kern) {
label = aa_get_label(kernel_t);
} else
label = aa_get_current_label();
if (sock->sk) {
struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
aa_put_label(ctx->label);
ctx->label = aa_get_label(label);
}
aa_put_label(label);
return 0;
}
/**
* apparmor_socket_bind - check perms before bind addr to socket
*/
static int apparmor_socket_bind(struct socket *sock,
struct sockaddr *address, int addrlen)
{
AA_BUG(!sock);
AA_BUG(!sock->sk);
AA_BUG(!address);
AA_BUG(in_interrupt());
return af_select(sock->sk->sk_family,
bind_perm(sock, address, addrlen),
aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk));
}
/**
* apparmor_socket_connect - check perms before connecting @sock to @address
*/
static int apparmor_socket_connect(struct socket *sock,
struct sockaddr *address, int addrlen)
{
AA_BUG(!sock);
AA_BUG(!sock->sk);
AA_BUG(!address);
AA_BUG(in_interrupt());
return af_select(sock->sk->sk_family,
connect_perm(sock, address, addrlen),
aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk));
}
/**
* apparmor_socket_listen - check perms before allowing listen
*/
static int apparmor_socket_listen(struct socket *sock, int backlog)
{
AA_BUG(!sock);
AA_BUG(!sock->sk);
AA_BUG(in_interrupt());
return af_select(sock->sk->sk_family,
listen_perm(sock, backlog),
aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk));
}
/**
* apparmor_socket_accept - check perms before accepting a new connection.
*
* Note: while @newsock is created and has some information, the accept
* has not been done.
*/
static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
{
AA_BUG(!sock);
AA_BUG(!sock->sk);
AA_BUG(!newsock);
AA_BUG(in_interrupt());
return af_select(sock->sk->sk_family,
accept_perm(sock, newsock),
aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk));
}
static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
struct msghdr *msg, int size)
{
AA_BUG(!sock);
AA_BUG(!sock->sk);
AA_BUG(!msg);
AA_BUG(in_interrupt());
return af_select(sock->sk->sk_family,
msg_perm(op, request, sock, msg, size),
aa_sk_perm(op, request, sock->sk));
}
/**
* apparmor_socket_sendmsg - check perms before sending msg to another socket
*/
static int apparmor_socket_sendmsg(struct socket *sock,
struct msghdr *msg, int size)
{
return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
}
/**
* apparmor_socket_recvmsg - check perms before receiving a message
*/
static int apparmor_socket_recvmsg(struct socket *sock,
struct msghdr *msg, int size, int flags)
{
return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size);
}
/* revaliation, get/set attr, shutdown */
static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
{
AA_BUG(!sock);
AA_BUG(!sock->sk);
AA_BUG(in_interrupt());
return af_select(sock->sk->sk_family,
sock_perm(op, request, sock),
aa_sk_perm(op, request, sock->sk));
}
/**
* apparmor_socket_getsockname - check perms before getting the local address
*/
static int apparmor_socket_getsockname(struct socket *sock)
{
return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
}
/**
* apparmor_socket_getpeername - check perms before getting remote address
*/
static int apparmor_socket_getpeername(struct socket *sock)
{
return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
}
/* revaliation, get/set attr, opt */
static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
int level, int optname)
{
AA_BUG(!sock);
AA_BUG(!sock->sk);
AA_BUG(in_interrupt());
return af_select(sock->sk->sk_family,
opt_perm(op, request, sock, level, optname),
aa_sk_perm(op, request, sock->sk));
}
/**
* apparmor_socket_getsockopt - check perms before getting socket options
*/
static int apparmor_socket_getsockopt(struct socket *sock, int level,
int optname)
{
return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock,
level, optname);
}
/**
* apparmor_socket_setsockopt - check perms before setting socket options
*/
static int apparmor_socket_setsockopt(struct socket *sock, int level,
int optname)
{
return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock,
level, optname);
}
/**
* apparmor_socket_shutdown - check perms before shutting down @sock conn
*/
static int apparmor_socket_shutdown(struct socket *sock, int how)
{
return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
}
#ifdef CONFIG_NETWORK_SECMARK
/**
* apparmor_socket_sock_rcv_skb - check perms before associating skb to sk
*
* Note: can not sleep may be called with locks held
*
* dont want protocol specific in __skb_recv_datagram()
* to deny an incoming connection socket_sock_rcv_skb()
*/
static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct aa_sk_ctx *ctx = SK_CTX(sk);
if (!skb->secmark)
return 0;
return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE,
skb->secmark, sk);
}
#endif
static struct aa_label *sk_peer_label(struct sock *sk)
{
struct aa_sk_ctx *ctx = SK_CTX(sk);
if (ctx->peer)
return ctx->peer;
return ERR_PTR(-ENOPROTOOPT);
}
/**
* apparmor_socket_getpeersec_stream - get security context of peer
*
* Note: for tcp only valid if using ipsec or cipso on lan
*/
static int apparmor_socket_getpeersec_stream(struct socket *sock,
sockptr_t optval, sockptr_t optlen,
unsigned int len)
{
char *name = NULL;
int slen, error = 0;
struct aa_label *label;
struct aa_label *peer;
label = begin_current_label_crit_section();
peer = sk_peer_label(sock->sk);
if (IS_ERR(peer)) {
error = PTR_ERR(peer);
goto done;
}
slen = aa_label_asxprint(&name, labels_ns(label), peer,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
/* don't include terminating \0 in slen, it breaks some apps */
if (slen < 0) {
error = -ENOMEM;
goto done;
}
if (slen > len) {
error = -ERANGE;
goto done_len;
}
if (copy_to_sockptr(optval, name, slen))
error = -EFAULT;
done_len:
if (copy_to_sockptr(optlen, &slen, sizeof(slen)))
error = -EFAULT;
done:
end_current_label_crit_section(label);
kfree(name);
return error;
}
/**
* apparmor_socket_getpeersec_dgram - get security label of packet
* @sock: the peer socket
* @skb: packet data
* @secid: pointer to where to put the secid of the packet
*
* Sets the netlabel socket state on sk from parent
*/
static int apparmor_socket_getpeersec_dgram(struct socket *sock,
struct sk_buff *skb, u32 *secid)
{
/* TODO: requires secid support */
return -ENOPROTOOPT;
}
/**
* apparmor_sock_graft - Initialize newly created socket
* @sk: child sock
* @parent: parent socket
*
* Note: could set off of SOCK_CTX(parent) but need to track inode and we can
* just set sk security information off of current creating process label
* Labeling of sk for accept case - probably should be sock based
* instead of task, because of the case where an implicitly labeled
* socket is shared by different tasks.
*/
static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
{
struct aa_sk_ctx *ctx = SK_CTX(sk);
if (!ctx->label)
ctx->label = aa_get_current_label();
}
#ifdef CONFIG_NETWORK_SECMARK
static int apparmor_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
struct aa_sk_ctx *ctx = SK_CTX(sk);
if (!skb->secmark)
return 0;
return apparmor_secmark_check(ctx->label, OP_CONNECT, AA_MAY_CONNECT,
skb->secmark, sk);
}
#endif
/*
* The cred blob is a pointer to, not an instance of, an aa_label.
*/
struct lsm_blob_sizes apparmor_blob_sizes __ro_after_init = {
.lbs_cred = sizeof(struct aa_label *),
.lbs_file = sizeof(struct aa_file_ctx),
.lbs_task = sizeof(struct aa_task_ctx),
};
static struct security_hook_list apparmor_hooks[] __ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
LSM_HOOK_INIT(capget, apparmor_capget),
LSM_HOOK_INIT(capable, apparmor_capable),
LSM_HOOK_INIT(sb_mount, apparmor_sb_mount),
LSM_HOOK_INIT(sb_umount, apparmor_sb_umount),
LSM_HOOK_INIT(sb_pivotroot, apparmor_sb_pivotroot),
LSM_HOOK_INIT(path_link, apparmor_path_link),
LSM_HOOK_INIT(path_unlink, apparmor_path_unlink),
LSM_HOOK_INIT(path_symlink, apparmor_path_symlink),
LSM_HOOK_INIT(path_mkdir, apparmor_path_mkdir),
LSM_HOOK_INIT(path_rmdir, apparmor_path_rmdir),
LSM_HOOK_INIT(path_mknod, apparmor_path_mknod),
LSM_HOOK_INIT(path_rename, apparmor_path_rename),
LSM_HOOK_INIT(path_chmod, apparmor_path_chmod),
LSM_HOOK_INIT(path_chown, apparmor_path_chown),
LSM_HOOK_INIT(path_truncate, apparmor_path_truncate),
LSM_HOOK_INIT(inode_getattr, apparmor_inode_getattr),
LSM_HOOK_INIT(file_open, apparmor_file_open),
LSM_HOOK_INIT(file_receive, apparmor_file_receive),
LSM_HOOK_INIT(file_permission, apparmor_file_permission),
LSM_HOOK_INIT(file_alloc_security, apparmor_file_alloc_security),
LSM_HOOK_INIT(file_free_security, apparmor_file_free_security),
LSM_HOOK_INIT(mmap_file, apparmor_mmap_file),
LSM_HOOK_INIT(file_mprotect, apparmor_file_mprotect),
LSM_HOOK_INIT(file_lock, apparmor_file_lock),
LSM_HOOK_INIT(file_truncate, apparmor_file_truncate),
LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
LSM_HOOK_INIT(socket_create, apparmor_socket_create),
LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
LSM_HOOK_INIT(socket_accept, apparmor_socket_accept),
LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg),
LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg),
LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname),
LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername),
LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
#ifdef CONFIG_NETWORK_SECMARK
LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
#endif
LSM_HOOK_INIT(socket_getpeersec_stream,
apparmor_socket_getpeersec_stream),
LSM_HOOK_INIT(socket_getpeersec_dgram,
apparmor_socket_getpeersec_dgram),
LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
#ifdef CONFIG_NETWORK_SECMARK
LSM_HOOK_INIT(inet_conn_request, apparmor_inet_conn_request),
#endif
LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
LSM_HOOK_INIT(cred_free, apparmor_cred_free),
LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
LSM_HOOK_INIT(cred_transfer, apparmor_cred_transfer),
LSM_HOOK_INIT(bprm_creds_for_exec, apparmor_bprm_creds_for_exec),
LSM_HOOK_INIT(bprm_committing_creds, apparmor_bprm_committing_creds),
LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds),
LSM_HOOK_INIT(task_free, apparmor_task_free),
LSM_HOOK_INIT(task_alloc, apparmor_task_alloc),
LSM_HOOK_INIT(current_getsecid_subj, apparmor_current_getsecid_subj),
LSM_HOOK_INIT(task_getsecid_obj, apparmor_task_getsecid_obj),
LSM_HOOK_INIT(task_setrlimit, apparmor_task_setrlimit),
LSM_HOOK_INIT(task_kill, apparmor_task_kill),
#ifdef CONFIG_AUDIT
LSM_HOOK_INIT(audit_rule_init, aa_audit_rule_init),
LSM_HOOK_INIT(audit_rule_known, aa_audit_rule_known),
LSM_HOOK_INIT(audit_rule_match, aa_audit_rule_match),
LSM_HOOK_INIT(audit_rule_free, aa_audit_rule_free),
#endif
LSM_HOOK_INIT(secid_to_secctx, apparmor_secid_to_secctx),
LSM_HOOK_INIT(secctx_to_secid, apparmor_secctx_to_secid),
LSM_HOOK_INIT(release_secctx, apparmor_release_secctx),
};
/*
* AppArmor sysfs module parameters
*/
static int param_set_aabool(const char *val, const struct kernel_param *kp);
static int param_get_aabool(char *buffer, const struct kernel_param *kp);
#define param_check_aabool param_check_bool
static const struct kernel_param_ops param_ops_aabool = {
.flags = KERNEL_PARAM_OPS_FL_NOARG,
.set = param_set_aabool,
.get = param_get_aabool
};
static int param_set_aauint(const char *val, const struct kernel_param *kp);
static int param_get_aauint(char *buffer, const struct kernel_param *kp);
#define param_check_aauint param_check_uint
static const struct kernel_param_ops param_ops_aauint = {
.set = param_set_aauint,
.get = param_get_aauint
};
static int param_set_aacompressionlevel(const char *val,
const struct kernel_param *kp);
static int param_get_aacompressionlevel(char *buffer,
const struct kernel_param *kp);
#define param_check_aacompressionlevel param_check_int
static const struct kernel_param_ops param_ops_aacompressionlevel = {
.set = param_set_aacompressionlevel,
.get = param_get_aacompressionlevel
};
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
#define param_check_aalockpolicy param_check_bool
static const struct kernel_param_ops param_ops_aalockpolicy = {
.flags = KERNEL_PARAM_OPS_FL_NOARG,
.set = param_set_aalockpolicy,
.get = param_get_aalockpolicy
};
static int param_set_audit(const char *val, const struct kernel_param *kp);
static int param_get_audit(char *buffer, const struct kernel_param *kp);
static int param_set_mode(const char *val, const struct kernel_param *kp);
static int param_get_mode(char *buffer, const struct kernel_param *kp);
/* Flag values, also controllable via /sys/module/apparmor/parameters
* We define special types as we want to do additional mediation.
*/
/* AppArmor global enforcement switch - complain, enforce, kill */
enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE;
module_param_call(mode, param_set_mode, param_get_mode,
&aa_g_profile_mode, S_IRUSR | S_IWUSR);
/* whether policy verification hashing is enabled */
bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT);
#ifdef CONFIG_SECURITY_APPARMOR_HASH
module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR);
#endif
/* whether policy exactly as loaded is retained for debug and checkpointing */
bool aa_g_export_binary = IS_ENABLED(CONFIG_SECURITY_APPARMOR_EXPORT_BINARY);
#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
module_param_named(export_binary, aa_g_export_binary, aabool, 0600);
#endif
/* policy loaddata compression level */
int aa_g_rawdata_compression_level = AA_DEFAULT_CLEVEL;
module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level,
aacompressionlevel, 0400);
/* Debug mode */
bool aa_g_debug = IS_ENABLED(CONFIG_SECURITY_APPARMOR_DEBUG_MESSAGES);
module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
/* Audit mode */
enum audit_mode aa_g_audit;
module_param_call(audit, param_set_audit, param_get_audit,
&aa_g_audit, S_IRUSR | S_IWUSR);
/* Determines if audit header is included in audited messages. This
* provides more context if the audit daemon is not running
*/
bool aa_g_audit_header = true;
module_param_named(audit_header, aa_g_audit_header, aabool,
S_IRUSR | S_IWUSR);
/* lock out loading/removal of policy
* TODO: add in at boot loading of policy, which is the only way to
* load policy, if lock_policy is set
*/
bool aa_g_lock_policy;
module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy,
S_IRUSR | S_IWUSR);
/* Syscall logging mode */
bool aa_g_logsyscall;
module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
/* Maximum pathname length before accesses will start getting rejected */
unsigned int aa_g_path_max = 2 * PATH_MAX;
module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
/* Determines how paranoid loading of policy is and how much verification
* on the loaded policy is done.
* DEPRECATED: read only as strict checking of load is always done now
* that none root users (user namespaces) can load policy.
*/
bool aa_g_paranoid_load = IS_ENABLED(CONFIG_SECURITY_APPARMOR_PARANOID_LOAD);
module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
static int param_get_aaintbool(char *buffer, const struct kernel_param *kp);
static int param_set_aaintbool(const char *val, const struct kernel_param *kp);
#define param_check_aaintbool param_check_int
static const struct kernel_param_ops param_ops_aaintbool = {
.set = param_set_aaintbool,
.get = param_get_aaintbool
};
/* Boot time disable flag */
static int apparmor_enabled __ro_after_init = 1;
module_param_named(enabled, apparmor_enabled, aaintbool, 0444);
static int __init apparmor_enabled_setup(char *str)
{
unsigned long enabled;
int error = kstrtoul(str, 0, &enabled);
if (!error)
apparmor_enabled = enabled ? 1 : 0;
return 1;
}
__setup("apparmor=", apparmor_enabled_setup);
/* set global flag turning off the ability to load policy */
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
return -EPERM;
return param_set_bool(val, kp);
}
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return param_get_bool(buffer, kp);
}
static int param_set_aabool(const char *val, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
return -EPERM;
return param_set_bool(val, kp);
}
static int param_get_aabool(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return param_get_bool(buffer, kp);
}
static int param_set_aauint(const char *val, const struct kernel_param *kp)
{
int error;
if (!apparmor_enabled)
return -EINVAL;
/* file is ro but enforce 2nd line check */
if (apparmor_initialized)
return -EPERM;
error = param_set_uint(val, kp);
aa_g_path_max = max_t(uint32_t, aa_g_path_max, sizeof(union aa_buffer));
pr_info("AppArmor: buffer size set to %d bytes\n", aa_g_path_max);
return error;
}
static int param_get_aauint(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return param_get_uint(buffer, kp);
}
/* Can only be set before AppArmor is initialized (i.e. on boot cmdline). */
static int param_set_aaintbool(const char *val, const struct kernel_param *kp)
{
struct kernel_param kp_local;
bool value;
int error;
if (apparmor_initialized)
return -EPERM;
/* Create local copy, with arg pointing to bool type. */
value = !!*((int *)kp->arg);
memcpy(&kp_local, kp, sizeof(kp_local));
kp_local.arg = &value;
error = param_set_bool(val, &kp_local);
if (!error)
*((int *)kp->arg) = *((bool *)kp_local.arg);
return error;
}
/*
* To avoid changing /sys/module/apparmor/parameters/enabled from Y/N to
* 1/0, this converts the "int that is actually bool" back to bool for
* display in the /sys filesystem, while keeping it "int" for the LSM
* infrastructure.
*/
static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
{
struct kernel_param kp_local;
bool value;
/* Create local copy, with arg pointing to bool type. */
value = !!*((int *)kp->arg);
memcpy(&kp_local, kp, sizeof(kp_local));
kp_local.arg = &value;
return param_get_bool(buffer, &kp_local);
}
static int param_set_aacompressionlevel(const char *val,
const struct kernel_param *kp)
{
int error;
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized)
return -EPERM;
error = param_set_int(val, kp);
aa_g_rawdata_compression_level = clamp(aa_g_rawdata_compression_level,
AA_MIN_CLEVEL, AA_MAX_CLEVEL);
pr_info("AppArmor: policy rawdata compression level set to %d\n",
aa_g_rawdata_compression_level);
return error;
}
static int param_get_aacompressionlevel(char *buffer,
const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return param_get_int(buffer, kp);
}
static int param_get_audit(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
}
static int param_set_audit(const char *val, const struct kernel_param *kp)
{
int i;
if (!apparmor_enabled)
return -EINVAL;
if (!val)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
return -EPERM;
i = match_string(audit_mode_names, AUDIT_MAX_INDEX, val);
if (i < 0)
return -EINVAL;
aa_g_audit = i;
return 0;
}
static int param_get_mode(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_view_capable(NULL))
return -EPERM;
return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
}
static int param_set_mode(const char *val, const struct kernel_param *kp)
{
int i;
if (!apparmor_enabled)
return -EINVAL;
if (!val)
return -EINVAL;
if (apparmor_initialized && !aa_current_policy_admin_capable(NULL))
return -EPERM;
i = match_string(aa_profile_mode_names, APPARMOR_MODE_NAMES_MAX_INDEX,
val);
if (i < 0)
return -EINVAL;
aa_g_profile_mode = i;
return 0;
}
char *aa_get_buffer(bool in_atomic)
{
union aa_buffer *aa_buf;
bool try_again = true;
gfp_t flags = (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
retry:
spin_lock(&aa_buffers_lock);
if (buffer_count > reserve_count ||
(in_atomic && !list_empty(&aa_global_buffers))) {
aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
list);
list_del(&aa_buf->list);
buffer_count--;
spin_unlock(&aa_buffers_lock);
return aa_buf->buffer;
}
if (in_atomic) {
/*
* out of reserve buffers and in atomic context so increase
* how many buffers to keep in reserve
*/
reserve_count++;
flags = GFP_ATOMIC;
}
spin_unlock(&aa_buffers_lock);
if (!in_atomic)
might_sleep();
aa_buf = kmalloc(aa_g_path_max, flags);
if (!aa_buf) {
if (try_again) {
try_again = false;
goto retry;
}
pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n");
return NULL;
}
return aa_buf->buffer;
}
void aa_put_buffer(char *buf)
{
union aa_buffer *aa_buf;
if (!buf)
return;
aa_buf = container_of(buf, union aa_buffer, buffer[0]);
spin_lock(&aa_buffers_lock);
list_add(&aa_buf->list, &aa_global_buffers);
buffer_count++;
spin_unlock(&aa_buffers_lock);
}
/*
* AppArmor init functions
*/
/**
* set_init_ctx - set a task context and profile on the first task.
*
* TODO: allow setting an alternate profile than unconfined
*/
static int __init set_init_ctx(void)
{
struct cred *cred = (__force struct cred *)current->real_cred;
set_cred_label(cred, aa_get_label(ns_unconfined(root_ns)));
return 0;
}
static void destroy_buffers(void)
{
union aa_buffer *aa_buf;
spin_lock(&aa_buffers_lock);
while (!list_empty(&aa_global_buffers)) {
aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
list);
list_del(&aa_buf->list);
spin_unlock(&aa_buffers_lock);
kfree(aa_buf);
spin_lock(&aa_buffers_lock);
}
spin_unlock(&aa_buffers_lock);
}
static int __init alloc_buffers(void)
{
union aa_buffer *aa_buf;
int i, num;
/*
* A function may require two buffers at once. Usually the buffers are
* used for a short period of time and are shared. On UP kernel buffers
* two should be enough, with more CPUs it is possible that more
* buffers will be used simultaneously. The preallocated pool may grow.
* This preallocation has also the side-effect that AppArmor will be
* disabled early at boot if aa_g_path_max is extremly high.
*/
if (num_online_cpus() > 1)
num = 4 + RESERVE_COUNT;
else
num = 2 + RESERVE_COUNT;
for (i = 0; i < num; i++) {
aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL |
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!aa_buf) {
destroy_buffers();
return -ENOMEM;
}
aa_put_buffer(aa_buf->buffer);
}
return 0;
}
#ifdef CONFIG_SYSCTL
static int apparmor_dointvec(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
if (!aa_current_policy_admin_capable(NULL))
return -EPERM;
if (!apparmor_enabled)
return -EINVAL;
return proc_dointvec(table, write, buffer, lenp, ppos);
}
static struct ctl_table apparmor_sysctl_table[] = {
{
.procname = "unprivileged_userns_apparmor_policy",
.data = &unprivileged_userns_apparmor_policy,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = apparmor_dointvec,
},
{
.procname = "apparmor_display_secid_mode",
.data = &apparmor_display_secid_mode,
.maxlen = sizeof(int),
.mode = 0600,
.proc_handler = apparmor_dointvec,
},
{ }
};
static int __init apparmor_init_sysctl(void)
{
return register_sysctl("kernel", apparmor_sysctl_table) ? 0 : -ENOMEM;
}
#else
static inline int apparmor_init_sysctl(void)
{
return 0;
}
#endif /* CONFIG_SYSCTL */
#if defined(CONFIG_NETFILTER) && defined(CONFIG_NETWORK_SECMARK)
static unsigned int apparmor_ip_postroute(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct aa_sk_ctx *ctx;
struct sock *sk;
if (!skb->secmark)
return NF_ACCEPT;
sk = skb_to_full_sk(skb);
if (sk == NULL)
return NF_ACCEPT;
ctx = SK_CTX(sk);
if (!apparmor_secmark_check(ctx->label, OP_SENDMSG, AA_MAY_SEND,
skb->secmark, sk))
return NF_ACCEPT;
return NF_DROP_ERR(-ECONNREFUSED);
}
static const struct nf_hook_ops apparmor_nf_ops[] = {
{
.hook = apparmor_ip_postroute,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_SELINUX_FIRST,
},
#if IS_ENABLED(CONFIG_IPV6)
{
.hook = apparmor_ip_postroute,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_SELINUX_FIRST,
},
#endif
};
static int __net_init apparmor_nf_register(struct net *net)
{
return nf_register_net_hooks(net, apparmor_nf_ops,
ARRAY_SIZE(apparmor_nf_ops));
}
static void __net_exit apparmor_nf_unregister(struct net *net)
{
nf_unregister_net_hooks(net, apparmor_nf_ops,
ARRAY_SIZE(apparmor_nf_ops));
}
static struct pernet_operations apparmor_net_ops = {
.init = apparmor_nf_register,
.exit = apparmor_nf_unregister,
};
static int __init apparmor_nf_ip_init(void)
{
int err;
if (!apparmor_enabled)
return 0;
err = register_pernet_subsys(&apparmor_net_ops);
if (err)
panic("Apparmor: register_pernet_subsys: error %d\n", err);
return 0;
}
__initcall(apparmor_nf_ip_init);
#endif
static int __init apparmor_init(void)
{
int error;
error = aa_setup_dfa_engine();
if (error) {
AA_ERROR("Unable to setup dfa engine\n");
goto alloc_out;
}
error = aa_alloc_root_ns();
if (error) {
AA_ERROR("Unable to allocate default profile namespace\n");
goto alloc_out;
}
error = apparmor_init_sysctl();
if (error) {
AA_ERROR("Unable to register sysctls\n");
goto alloc_out;
}
error = alloc_buffers();
if (error) {
AA_ERROR("Unable to allocate work buffers\n");
goto alloc_out;
}
error = set_init_ctx();
if (error) {
AA_ERROR("Failed to set context on init task\n");
aa_free_root_ns();
goto buffers_out;
}
security_add_hooks(apparmor_hooks, ARRAY_SIZE(apparmor_hooks),
"apparmor");
/* Report that AppArmor successfully initialized */
apparmor_initialized = 1;
if (aa_g_profile_mode == APPARMOR_COMPLAIN)
aa_info_message("AppArmor initialized: complain mode enabled");
else if (aa_g_profile_mode == APPARMOR_KILL)
aa_info_message("AppArmor initialized: kill mode enabled");
else
aa_info_message("AppArmor initialized");
return error;
buffers_out:
destroy_buffers();
alloc_out:
aa_destroy_aafs();
aa_teardown_dfa_engine();
apparmor_enabled = false;
return error;
}
DEFINE_LSM(apparmor) = {
.name = "apparmor",
.flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
.enabled = &apparmor_enabled,
.blobs = &apparmor_blob_sizes,
.init = apparmor_init,
};
| linux-master | security/apparmor/lsm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains basic common functions used in AppArmor
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include "include/audit.h"
#include "include/apparmor.h"
#include "include/lib.h"
#include "include/perms.h"
#include "include/policy.h"
struct aa_perms nullperms;
struct aa_perms allperms = { .allow = ALL_PERMS_MASK,
.quiet = ALL_PERMS_MASK,
.hide = ALL_PERMS_MASK };
/**
* aa_free_str_table - free entries str table
* @str: the string table to free (MAYBE NULL)
*/
void aa_free_str_table(struct aa_str_table *t)
{
int i;
if (t) {
if (!t->table)
return;
for (i = 0; i < t->size; i++)
kfree_sensitive(t->table[i]);
kfree_sensitive(t->table);
t->table = NULL;
}
}
/**
* aa_split_fqname - split a fqname into a profile and namespace name
* @fqname: a full qualified name in namespace profile format (NOT NULL)
* @ns_name: pointer to portion of the string containing the ns name (NOT NULL)
*
* Returns: profile name or NULL if one is not specified
*
* Split a namespace name from a profile name (see policy.c for naming
* description). If a portion of the name is missing it returns NULL for
* that portion.
*
* NOTE: may modify the @fqname string. The pointers returned point
* into the @fqname string.
*/
char *aa_split_fqname(char *fqname, char **ns_name)
{
char *name = strim(fqname);
*ns_name = NULL;
if (name[0] == ':') {
char *split = strchr(&name[1], ':');
*ns_name = skip_spaces(&name[1]);
if (split) {
/* overwrite ':' with \0 */
*split++ = 0;
if (strncmp(split, "//", 2) == 0)
split += 2;
name = skip_spaces(split);
} else
/* a ns name without a following profile is allowed */
name = NULL;
}
if (name && *name == 0)
name = NULL;
return name;
}
/**
* skipn_spaces - Removes leading whitespace from @str.
* @str: The string to be stripped.
*
* Returns a pointer to the first non-whitespace character in @str.
* if all whitespace will return NULL
*/
const char *skipn_spaces(const char *str, size_t n)
{
for (; n && isspace(*str); --n)
++str;
if (n)
return (char *)str;
return NULL;
}
const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
size_t *ns_len)
{
const char *end = fqname + n;
const char *name = skipn_spaces(fqname, n);
*ns_name = NULL;
*ns_len = 0;
if (!name)
return NULL;
if (name[0] == ':') {
char *split = strnchr(&name[1], end - &name[1], ':');
*ns_name = skipn_spaces(&name[1], end - &name[1]);
if (!*ns_name)
return NULL;
if (split) {
*ns_len = split - *ns_name;
if (*ns_len == 0)
*ns_name = NULL;
split++;
if (end - split > 1 && strncmp(split, "//", 2) == 0)
split += 2;
name = skipn_spaces(split, end - split);
} else {
/* a ns name without a following profile is allowed */
name = NULL;
*ns_len = end - *ns_name;
}
}
if (name && *name == 0)
name = NULL;
return name;
}
/**
* aa_info_message - log a none profile related status message
* @str: message to log
*/
void aa_info_message(const char *str)
{
if (audit_enabled) {
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
aad(&sa)->info = str;
aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
}
printk(KERN_INFO "AppArmor: %s\n", str);
}
__counted char *aa_str_alloc(int size, gfp_t gfp)
{
struct counted_str *str;
str = kmalloc(struct_size(str, name, size), gfp);
if (!str)
return NULL;
kref_init(&str->count);
return str->name;
}
void aa_str_kref(struct kref *kref)
{
kfree(container_of(kref, struct counted_str, count));
}
const char aa_file_perm_chrs[] = "xwracd km l ";
const char *aa_file_perm_names[] = {
"exec",
"write",
"read",
"append",
"create",
"delete",
"open",
"rename",
"setattr",
"getattr",
"setcred",
"getcred",
"chmod",
"chown",
"chgrp",
"lock",
"mmap",
"mprot",
"link",
"snapshot",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"stack",
"change_onexec",
"change_profile",
"change_hat",
};
/**
* aa_perm_mask_to_str - convert a perm mask to its short string
* @str: character buffer to store string in (at least 10 characters)
* @str_size: size of the @str buffer
* @chrs: NUL-terminated character buffer of permission characters
* @mask: permission mask to convert
*/
void aa_perm_mask_to_str(char *str, size_t str_size, const char *chrs, u32 mask)
{
unsigned int i, perm = 1;
size_t num_chrs = strlen(chrs);
for (i = 0; i < num_chrs; perm <<= 1, i++) {
if (mask & perm) {
/* Ensure that one byte is left for NUL-termination */
if (WARN_ON_ONCE(str_size <= 1))
break;
*str++ = chrs[i];
str_size--;
}
}
*str = '\0';
}
void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
u32 mask)
{
const char *fmt = "%s";
unsigned int i, perm = 1;
bool prev = false;
for (i = 0; i < 32; perm <<= 1, i++) {
if (mask & perm) {
audit_log_format(ab, fmt, names[i]);
if (!prev) {
prev = true;
fmt = " %s";
}
}
}
}
void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
u32 chrsmask, const char * const *names, u32 namesmask)
{
char str[33];
audit_log_format(ab, "\"");
if ((mask & chrsmask) && chrs) {
aa_perm_mask_to_str(str, sizeof(str), chrs, mask & chrsmask);
mask &= ~chrsmask;
audit_log_format(ab, "%s", str);
if (mask & namesmask)
audit_log_format(ab, " ");
}
if ((mask & namesmask) && names)
aa_audit_perm_names(ab, names, mask & namesmask);
audit_log_format(ab, "\"");
}
/**
* aa_audit_perms_cb - generic callback fn for auditing perms
* @ab: audit buffer (NOT NULL)
* @va: audit struct to audit values of (NOT NULL)
*/
static void aa_audit_perms_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
if (aad(sa)->request) {
audit_log_format(ab, " requested_mask=");
aa_audit_perm_mask(ab, aad(sa)->request, aa_file_perm_chrs,
PERMS_CHRS_MASK, aa_file_perm_names,
PERMS_NAMES_MASK);
}
if (aad(sa)->denied) {
audit_log_format(ab, "denied_mask=");
aa_audit_perm_mask(ab, aad(sa)->denied, aa_file_perm_chrs,
PERMS_CHRS_MASK, aa_file_perm_names,
PERMS_NAMES_MASK);
}
audit_log_format(ab, " peer=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
FLAGS_NONE, GFP_ATOMIC);
}
/**
* aa_apply_modes_to_perms - apply namespace and profile flags to perms
* @profile: that perms where computed from
* @perms: perms to apply mode modifiers to
*
* TODO: split into profile and ns based flags for when accumulating perms
*/
void aa_apply_modes_to_perms(struct aa_profile *profile, struct aa_perms *perms)
{
switch (AUDIT_MODE(profile)) {
case AUDIT_ALL:
perms->audit = ALL_PERMS_MASK;
fallthrough;
case AUDIT_NOQUIET:
perms->quiet = 0;
break;
case AUDIT_QUIET:
perms->audit = 0;
fallthrough;
case AUDIT_QUIET_DENIED:
perms->quiet = ALL_PERMS_MASK;
break;
}
if (KILL_MODE(profile))
perms->kill = ALL_PERMS_MASK;
else if (COMPLAIN_MODE(profile))
perms->complain = ALL_PERMS_MASK;
else if (USER_MODE(profile))
perms->prompt = ALL_PERMS_MASK;
}
void aa_profile_match_label(struct aa_profile *profile,
struct aa_ruleset *rules,
struct aa_label *label,
int type, u32 request, struct aa_perms *perms)
{
/* TODO: doesn't yet handle extended types */
aa_state_t state;
state = aa_dfa_next(rules->policy.dfa,
rules->policy.start[AA_CLASS_LABEL],
type);
aa_label_match(profile, rules, label, state, false, request, perms);
}
/* currently unused */
int aa_profile_label_perm(struct aa_profile *profile, struct aa_profile *target,
u32 request, int type, u32 *deny,
struct common_audit_data *sa)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_perms perms;
aad(sa)->label = &profile->label;
aad(sa)->peer = &target->label;
aad(sa)->request = request;
aa_profile_match_label(profile, rules, &target->label, type, request,
&perms);
aa_apply_modes_to_perms(profile, &perms);
*deny |= request & perms.deny;
return aa_check_perms(profile, &perms, request, sa, aa_audit_perms_cb);
}
/**
* aa_check_perms - do audit mode selection based on perms set
* @profile: profile being checked
* @perms: perms computed for the request
* @request: requested perms
* @deny: Returns: explicit deny set
* @sa: initialized audit structure (MAY BE NULL if not auditing)
* @cb: callback fn for type specific fields (MAY BE NULL)
*
* Returns: 0 if permission else error code
*
* Note: profile audit modes need to be set before calling by setting the
* perm masks appropriately.
*
* If not auditing then complain mode is not enabled and the
* error code will indicate whether there was an explicit deny
* with a positive value.
*/
int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms,
u32 request, struct common_audit_data *sa,
void (*cb)(struct audit_buffer *, void *))
{
int type, error;
u32 denied = request & (~perms->allow | perms->deny);
if (likely(!denied)) {
/* mask off perms that are not being force audited */
request &= perms->audit;
if (!request || !sa)
return 0;
type = AUDIT_APPARMOR_AUDIT;
error = 0;
} else {
error = -EACCES;
if (denied & perms->kill)
type = AUDIT_APPARMOR_KILL;
else if (denied == (denied & perms->complain))
type = AUDIT_APPARMOR_ALLOWED;
else
type = AUDIT_APPARMOR_DENIED;
if (denied == (denied & perms->hide))
error = -ENOENT;
denied &= ~perms->quiet;
if (!sa || !denied)
return error;
}
if (sa) {
aad(sa)->label = &profile->label;
aad(sa)->request = request;
aad(sa)->denied = denied;
aad(sa)->error = error;
aa_audit_msg(type, sa, cb);
}
if (type == AUDIT_APPARMOR_ALLOWED)
error = 0;
return error;
}
/**
* aa_policy_init - initialize a policy structure
* @policy: policy to initialize (NOT NULL)
* @prefix: prefix name if any is required. (MAYBE NULL)
* @name: name of the policy, init will make a copy of it (NOT NULL)
* @gfp: allocation mode
*
* Note: this fn creates a copy of strings passed in
*
* Returns: true if policy init successful
*/
bool aa_policy_init(struct aa_policy *policy, const char *prefix,
const char *name, gfp_t gfp)
{
char *hname;
/* freed by policy_free */
if (prefix) {
hname = aa_str_alloc(strlen(prefix) + strlen(name) + 3, gfp);
if (hname)
sprintf(hname, "%s//%s", prefix, name);
} else {
hname = aa_str_alloc(strlen(name) + 1, gfp);
if (hname)
strcpy(hname, name);
}
if (!hname)
return false;
policy->hname = hname;
/* base.name is a substring of fqname */
policy->name = basename(policy->hname);
INIT_LIST_HEAD(&policy->list);
INIT_LIST_HEAD(&policy->profiles);
return true;
}
/**
* aa_policy_destroy - free the elements referenced by @policy
* @policy: policy that is to have its elements freed (NOT NULL)
*/
void aa_policy_destroy(struct aa_policy *policy)
{
AA_BUG(on_list_rcu(&policy->profiles));
AA_BUG(on_list_rcu(&policy->list));
/* don't free name as its a subset of hname */
aa_put_str(policy->hname);
}
| linux-master | security/apparmor/lib.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor security identifier (secid) manipulation fns
*
* Copyright 2009-2017 Canonical Ltd.
*
* AppArmor allocates a unique secid for every label used. If a label
* is replaced it receives the secid of the label it is replacing.
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/xarray.h>
#include "include/cred.h"
#include "include/lib.h"
#include "include/secid.h"
#include "include/label.h"
#include "include/policy_ns.h"
/*
* secids - do not pin labels with a refcount. They rely on the label
* properly updating/freeing them
*/
#define AA_FIRST_SECID 2
static DEFINE_XARRAY_FLAGS(aa_secids, XA_FLAGS_LOCK_IRQ | XA_FLAGS_TRACK_FREE);
int apparmor_display_secid_mode;
/*
* TODO: allow policy to reserve a secid range?
* TODO: add secid pinning
* TODO: use secid_update in label replace
*/
/**
* aa_secid_update - update a secid mapping to a new label
* @secid: secid to update
* @label: label the secid will now map to
*/
void aa_secid_update(u32 secid, struct aa_label *label)
{
unsigned long flags;
xa_lock_irqsave(&aa_secids, flags);
__xa_store(&aa_secids, secid, label, 0);
xa_unlock_irqrestore(&aa_secids, flags);
}
/*
* see label for inverse aa_label_to_secid
*/
struct aa_label *aa_secid_to_label(u32 secid)
{
return xa_load(&aa_secids, secid);
}
int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
/* TODO: cache secctx and ref count so we don't have to recreate */
struct aa_label *label = aa_secid_to_label(secid);
int flags = FLAG_VIEW_SUBNS | FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT;
int len;
AA_BUG(!seclen);
if (!label)
return -EINVAL;
if (apparmor_display_secid_mode)
flags |= FLAG_SHOW_MODE;
if (secdata)
len = aa_label_asxprint(secdata, root_ns, label,
flags, GFP_ATOMIC);
else
len = aa_label_snxprint(NULL, 0, root_ns, label, flags);
if (len < 0)
return -ENOMEM;
*seclen = len;
return 0;
}
int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
struct aa_label *label;
label = aa_label_strn_parse(&root_ns->unconfined->label, secdata,
seclen, GFP_KERNEL, false, false);
if (IS_ERR(label))
return PTR_ERR(label);
*secid = label->secid;
return 0;
}
void apparmor_release_secctx(char *secdata, u32 seclen)
{
kfree(secdata);
}
/**
* aa_alloc_secid - allocate a new secid for a profile
* @label: the label to allocate a secid for
* @gfp: memory allocation flags
*
* Returns: 0 with @label->secid initialized
* <0 returns error with @label->secid set to AA_SECID_INVALID
*/
int aa_alloc_secid(struct aa_label *label, gfp_t gfp)
{
unsigned long flags;
int ret;
xa_lock_irqsave(&aa_secids, flags);
ret = __xa_alloc(&aa_secids, &label->secid, label,
XA_LIMIT(AA_FIRST_SECID, INT_MAX), gfp);
xa_unlock_irqrestore(&aa_secids, flags);
if (ret < 0) {
label->secid = AA_SECID_INVALID;
return ret;
}
return 0;
}
/**
* aa_free_secid - free a secid
* @secid: secid to free
*/
void aa_free_secid(u32 secid)
{
unsigned long flags;
xa_lock_irqsave(&aa_secids, flags);
__xa_erase(&aa_secids, secid);
xa_unlock_irqrestore(&aa_secids, flags);
}
| linux-master | security/apparmor/secid.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor resource mediation and attachment
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include <linux/audit.h>
#include <linux/security.h>
#include "include/audit.h"
#include "include/cred.h"
#include "include/resource.h"
#include "include/policy.h"
/*
* Table of rlimit names: we generate it from resource.h.
*/
#include "rlim_names.h"
struct aa_sfs_entry aa_sfs_entry_rlimit[] = {
AA_SFS_FILE_STRING("mask", AA_SFS_RLIMIT_MASK),
{ }
};
/* audit callback for resource specific fields */
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
audit_log_format(ab, " rlimit=%s value=%lu",
rlim_names[aad(sa)->rlim.rlim], aad(sa)->rlim.max);
if (aad(sa)->peer) {
audit_log_format(ab, " peer=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
FLAGS_NONE, GFP_ATOMIC);
}
}
/**
* audit_resource - audit setting resource limit
* @profile: profile being enforced (NOT NULL)
* @resource: rlimit being auditing
* @value: value being set
* @peer: aa_albel of the task being set
* @info: info being auditing
* @error: error value
*
* Returns: 0 or sa->error else other error code on failure
*/
static int audit_resource(struct aa_profile *profile, unsigned int resource,
unsigned long value, struct aa_label *peer,
const char *info, int error)
{
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_RLIMITS,
OP_SETRLIMIT);
aad(&sa)->rlim.rlim = resource;
aad(&sa)->rlim.max = value;
aad(&sa)->peer = peer;
aad(&sa)->info = info;
aad(&sa)->error = error;
return aa_audit(AUDIT_APPARMOR_AUTO, profile, &sa, audit_cb);
}
/**
* aa_map_resource - map compiled policy resource to internal #
* @resource: flattened policy resource number
*
* Returns: resource # for the current architecture.
*
* rlimit resource can vary based on architecture, map the compiled policy
* resource # to the internal representation for the architecture.
*/
int aa_map_resource(int resource)
{
return rlim_map[resource];
}
static int profile_setrlimit(struct aa_profile *profile, unsigned int resource,
struct rlimit *new_rlim)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
int e = 0;
if (rules->rlimits.mask & (1 << resource) && new_rlim->rlim_max >
rules->rlimits.limits[resource].rlim_max)
e = -EACCES;
return audit_resource(profile, resource, new_rlim->rlim_max, NULL, NULL,
e);
}
/**
* aa_task_setrlimit - test permission to set an rlimit
* @label - label confining the task (NOT NULL)
* @task - task the resource is being set on
* @resource - the resource being set
* @new_rlim - the new resource limit (NOT NULL)
*
* Control raising the processes hard limit.
*
* Returns: 0 or error code if setting resource failed
*/
int aa_task_setrlimit(struct aa_label *label, struct task_struct *task,
unsigned int resource, struct rlimit *new_rlim)
{
struct aa_profile *profile;
struct aa_label *peer;
int error = 0;
rcu_read_lock();
peer = aa_get_newest_cred_label(__task_cred(task));
rcu_read_unlock();
/* TODO: extend resource control to handle other (non current)
* profiles. AppArmor rules currently have the implicit assumption
* that the task is setting the resource of a task confined with
* the same profile or that the task setting the resource of another
* task has CAP_SYS_RESOURCE.
*/
if (label != peer &&
aa_capable(label, CAP_SYS_RESOURCE, CAP_OPT_NOAUDIT) != 0)
error = fn_for_each(label, profile,
audit_resource(profile, resource,
new_rlim->rlim_max, peer,
"cap_sys_resource", -EACCES));
else
error = fn_for_each_confined(label, profile,
profile_setrlimit(profile, resource, new_rlim));
aa_put_label(peer);
return error;
}
/**
* __aa_transition_rlimits - apply new profile rlimits
* @old_l: old label on task (NOT NULL)
* @new_l: new label with rlimits to apply (NOT NULL)
*/
void __aa_transition_rlimits(struct aa_label *old_l, struct aa_label *new_l)
{
unsigned int mask = 0;
struct rlimit *rlim, *initrlim;
struct aa_profile *old, *new;
struct label_it i;
old = labels_profile(old_l);
new = labels_profile(new_l);
/* for any rlimits the profile controlled, reset the soft limit
* to the lesser of the tasks hard limit and the init tasks soft limit
*/
label_for_each_confined(i, old_l, old) {
struct aa_ruleset *rules = list_first_entry(&old->rules,
typeof(*rules),
list);
if (rules->rlimits.mask) {
int j;
for (j = 0, mask = 1; j < RLIM_NLIMITS; j++,
mask <<= 1) {
if (rules->rlimits.mask & mask) {
rlim = current->signal->rlim + j;
initrlim = init_task.signal->rlim + j;
rlim->rlim_cur = min(rlim->rlim_max,
initrlim->rlim_cur);
}
}
}
}
/* set any new hard limits as dictated by the new profile */
label_for_each_confined(i, new_l, new) {
struct aa_ruleset *rules = list_first_entry(&new->rules,
typeof(*rules),
list);
int j;
if (!rules->rlimits.mask)
continue;
for (j = 0, mask = 1; j < RLIM_NLIMITS; j++, mask <<= 1) {
if (!(rules->rlimits.mask & mask))
continue;
rlim = current->signal->rlim + j;
rlim->rlim_max = min(rlim->rlim_max,
rules->rlimits.limits[j].rlim_max);
/* soft limit should not exceed hard limit */
rlim->rlim_cur = min(rlim->rlim_cur, rlim->rlim_max);
}
}
}
| linux-master | security/apparmor/resource.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor functions for unpacking policy loaded from
* userspace.
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*
* AppArmor uses a serialized binary format for loading policy. To find
* policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
* All policy is validated before it is used.
*/
#include <asm/unaligned.h>
#include <kunit/visibility.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/zstd.h>
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/cred.h"
#include "include/crypto.h"
#include "include/file.h"
#include "include/match.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/policy_unpack.h"
#include "include/policy_compat.h"
/* audit callback for unpack fields */
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
if (aad(sa)->iface.ns) {
audit_log_format(ab, " ns=");
audit_log_untrustedstring(ab, aad(sa)->iface.ns);
}
if (aad(sa)->name) {
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, aad(sa)->name);
}
if (aad(sa)->iface.pos)
audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
}
/**
* audit_iface - do audit message for policy unpacking/load/replace/remove
* @new: profile if it has been allocated (MAYBE NULL)
* @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
* @name: name of the profile being manipulated (MAYBE NULL)
* @info: any extra info about the failure (MAYBE NULL)
* @e: buffer position info
* @error: error code
*
* Returns: %0 or error
*/
static int audit_iface(struct aa_profile *new, const char *ns_name,
const char *name, const char *info, struct aa_ext *e,
int error)
{
struct aa_profile *profile = labels_profile(aa_current_raw_label());
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL);
if (e)
aad(&sa)->iface.pos = e->pos - e->start;
aad(&sa)->iface.ns = ns_name;
if (new)
aad(&sa)->name = new->base.hname;
else
aad(&sa)->name = name;
aad(&sa)->info = info;
aad(&sa)->error = error;
return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
}
void __aa_loaddata_update(struct aa_loaddata *data, long revision)
{
AA_BUG(!data);
AA_BUG(!data->ns);
AA_BUG(!mutex_is_locked(&data->ns->lock));
AA_BUG(data->revision > revision);
data->revision = revision;
if ((data->dents[AAFS_LOADDATA_REVISION])) {
struct inode *inode;
inode = d_inode(data->dents[AAFS_LOADDATA_DIR]);
inode->i_mtime = inode_set_ctime_current(inode);
inode = d_inode(data->dents[AAFS_LOADDATA_REVISION]);
inode->i_mtime = inode_set_ctime_current(inode);
}
}
bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
{
if (l->size != r->size)
return false;
if (l->compressed_size != r->compressed_size)
return false;
if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
return false;
return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
}
/*
* need to take the ns mutex lock which is NOT safe most places that
* put_loaddata is called, so we have to delay freeing it
*/
static void do_loaddata_free(struct work_struct *work)
{
struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
struct aa_ns *ns = aa_get_ns(d->ns);
if (ns) {
mutex_lock_nested(&ns->lock, ns->level);
__aa_fs_remove_rawdata(d);
mutex_unlock(&ns->lock);
aa_put_ns(ns);
}
kfree_sensitive(d->hash);
kfree_sensitive(d->name);
kvfree(d->data);
kfree_sensitive(d);
}
void aa_loaddata_kref(struct kref *kref)
{
struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
if (d) {
INIT_WORK(&d->work, do_loaddata_free);
schedule_work(&d->work);
}
}
struct aa_loaddata *aa_loaddata_alloc(size_t size)
{
struct aa_loaddata *d;
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (d == NULL)
return ERR_PTR(-ENOMEM);
d->data = kvzalloc(size, GFP_KERNEL);
if (!d->data) {
kfree(d);
return ERR_PTR(-ENOMEM);
}
kref_init(&d->count);
INIT_LIST_HEAD(&d->list);
return d;
}
/* test if read will be in packed data bounds */
VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size)
{
return (size <= e->end - e->pos);
}
EXPORT_SYMBOL_IF_KUNIT(aa_inbounds);
/**
* aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
* @e: serialized data read head (NOT NULL)
* @chunk: start address for chunk of data (NOT NULL)
*
* Returns: the size of chunk found with the read head at the end of the chunk.
*/
VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk)
{
size_t size = 0;
void *pos = e->pos;
if (!aa_inbounds(e, sizeof(u16)))
goto fail;
size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
e->pos += sizeof(__le16);
if (!aa_inbounds(e, size))
goto fail;
*chunk = e->pos;
e->pos += size;
return size;
fail:
e->pos = pos;
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk);
/* unpack control byte */
VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code)
{
if (!aa_inbounds(e, 1))
return false;
if (*(u8 *) e->pos != code)
return false;
e->pos++;
return true;
}
EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X);
/**
* aa_unpack_nameX - check is the next element is of type X with a name of @name
* @e: serialized data extent information (NOT NULL)
* @code: type code
* @name: name to match to the serialized element. (MAYBE NULL)
*
* check that the next serialized data element is of type X and has a tag
* name @name. If @name is specified then there must be a matching
* name element in the stream. If @name is NULL any name element will be
* skipped and only the typecode will be tested.
*
* Returns true on success (both type code and name tests match) and the read
* head is advanced past the headers
*
* Returns: false if either match fails, the read head does not move
*/
VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
{
/*
* May need to reset pos if name or type doesn't match
*/
void *pos = e->pos;
/*
* Check for presence of a tagname, and if present name size
* AA_NAME tag value is a u16.
*/
if (aa_unpack_X(e, AA_NAME)) {
char *tag = NULL;
size_t size = aa_unpack_u16_chunk(e, &tag);
/* if a name is specified it must match. otherwise skip tag */
if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
goto fail;
} else if (name) {
/* if a name is specified and there is no name tag fail */
goto fail;
}
/* now check if type code matches */
if (aa_unpack_X(e, code))
return true;
fail:
e->pos = pos;
return false;
}
EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX);
static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
{
void *pos = e->pos;
if (aa_unpack_nameX(e, AA_U8, name)) {
if (!aa_inbounds(e, sizeof(u8)))
goto fail;
if (data)
*data = *((u8 *)e->pos);
e->pos += sizeof(u8);
return true;
}
fail:
e->pos = pos;
return false;
}
VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name)
{
void *pos = e->pos;
if (aa_unpack_nameX(e, AA_U32, name)) {
if (!aa_inbounds(e, sizeof(u32)))
goto fail;
if (data)
*data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
e->pos += sizeof(u32);
return true;
}
fail:
e->pos = pos;
return false;
}
EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32);
VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name)
{
void *pos = e->pos;
if (aa_unpack_nameX(e, AA_U64, name)) {
if (!aa_inbounds(e, sizeof(u64)))
goto fail;
if (data)
*data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
e->pos += sizeof(u64);
return true;
}
fail:
e->pos = pos;
return false;
}
EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64);
static bool aa_unpack_cap_low(struct aa_ext *e, kernel_cap_t *data, const char *name)
{
u32 val;
if (!aa_unpack_u32(e, &val, name))
return false;
data->val = val;
return true;
}
static bool aa_unpack_cap_high(struct aa_ext *e, kernel_cap_t *data, const char *name)
{
u32 val;
if (!aa_unpack_u32(e, &val, name))
return false;
data->val = (u32)data->val | ((u64)val << 32);
return true;
}
VISIBLE_IF_KUNIT bool aa_unpack_array(struct aa_ext *e, const char *name, u16 *size)
{
void *pos = e->pos;
if (aa_unpack_nameX(e, AA_ARRAY, name)) {
if (!aa_inbounds(e, sizeof(u16)))
goto fail;
*size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
e->pos += sizeof(u16);
return true;
}
fail:
e->pos = pos;
return false;
}
EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array);
VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name)
{
void *pos = e->pos;
if (aa_unpack_nameX(e, AA_BLOB, name)) {
u32 size;
if (!aa_inbounds(e, sizeof(u32)))
goto fail;
size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
e->pos += sizeof(u32);
if (aa_inbounds(e, (size_t) size)) {
*blob = e->pos;
e->pos += size;
return size;
}
}
fail:
e->pos = pos;
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob);
VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name)
{
char *src_str;
size_t size = 0;
void *pos = e->pos;
*string = NULL;
if (aa_unpack_nameX(e, AA_STRING, name)) {
size = aa_unpack_u16_chunk(e, &src_str);
if (size) {
/* strings are null terminated, length is size - 1 */
if (src_str[size - 1] != 0)
goto fail;
*string = src_str;
return size;
}
}
fail:
e->pos = pos;
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str);
VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name)
{
const char *tmp;
void *pos = e->pos;
int res = aa_unpack_str(e, &tmp, name);
*string = NULL;
if (!res)
return 0;
*string = kmemdup(tmp, res, GFP_KERNEL);
if (!*string) {
e->pos = pos;
return 0;
}
return res;
}
EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup);
/**
* unpack_dfa - unpack a file rule dfa
* @e: serialized data extent information (NOT NULL)
* @flags: dfa flags to check
*
* returns dfa or ERR_PTR or NULL if no dfa
*/
static struct aa_dfa *unpack_dfa(struct aa_ext *e, int flags)
{
char *blob = NULL;
size_t size;
struct aa_dfa *dfa = NULL;
size = aa_unpack_blob(e, &blob, "aadfa");
if (size) {
/*
* The dfa is aligned with in the blob to 8 bytes
* from the beginning of the stream.
* alignment adjust needed by dfa unpack
*/
size_t sz = blob - (char *) e->start -
((e->pos - e->start) & 7);
size_t pad = ALIGN(sz, 8) - sz;
if (aa_g_paranoid_load)
flags |= DFA_FLAG_VERIFY_STATES;
dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
if (IS_ERR(dfa))
return dfa;
}
return dfa;
}
/**
* unpack_trans_table - unpack a profile transition table
* @e: serialized data extent information (NOT NULL)
* @strs: str table to unpack to (NOT NULL)
*
* Returns: true if table successfully unpacked or not present
*/
static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs)
{
void *saved_pos = e->pos;
char **table = NULL;
/* exec table is optional */
if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) {
u16 size;
int i;
if (!aa_unpack_array(e, NULL, &size))
/*
* Note: index into trans table array is a max
* of 2^24, but unpack array can only unpack
* an array of 2^16 in size atm so no need
* for size check here
*/
goto fail;
table = kcalloc(size, sizeof(char *), GFP_KERNEL);
if (!table)
goto fail;
for (i = 0; i < size; i++) {
char *str;
int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
/* aa_unpack_strdup verifies that the last character is
* null termination byte.
*/
if (!size2)
goto fail;
table[i] = str;
/* verify that name doesn't start with space */
if (isspace(*str))
goto fail;
/* count internal # of internal \0 */
for (c = j = 0; j < size2 - 1; j++) {
if (!str[j]) {
pos = j;
c++;
}
}
if (*str == ':') {
/* first character after : must be valid */
if (!str[1])
goto fail;
/* beginning with : requires an embedded \0,
* verify that exactly 1 internal \0 exists
* trailing \0 already verified by aa_unpack_strdup
*
* convert \0 back to : for label_parse
*/
if (c == 1)
str[pos] = ':';
else if (c > 1)
goto fail;
} else if (c)
/* fail - all other cases with embedded \0 */
goto fail;
}
if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
goto fail;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
strs->table = table;
strs->size = size;
}
return true;
fail:
kfree_sensitive(table);
e->pos = saved_pos;
return false;
}
static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
{
void *pos = e->pos;
if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) {
u16 size;
int i;
if (!aa_unpack_array(e, NULL, &size))
goto fail;
profile->attach.xattr_count = size;
profile->attach.xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
if (!profile->attach.xattrs)
goto fail;
for (i = 0; i < size; i++) {
if (!aa_unpack_strdup(e, &profile->attach.xattrs[i], NULL))
goto fail;
}
if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
goto fail;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
}
return true;
fail:
e->pos = pos;
return false;
}
static bool unpack_secmark(struct aa_ext *e, struct aa_ruleset *rules)
{
void *pos = e->pos;
u16 size;
int i;
if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) {
if (!aa_unpack_array(e, NULL, &size))
goto fail;
rules->secmark = kcalloc(size, sizeof(struct aa_secmark),
GFP_KERNEL);
if (!rules->secmark)
goto fail;
rules->secmark_count = size;
for (i = 0; i < size; i++) {
if (!unpack_u8(e, &rules->secmark[i].audit, NULL))
goto fail;
if (!unpack_u8(e, &rules->secmark[i].deny, NULL))
goto fail;
if (!aa_unpack_strdup(e, &rules->secmark[i].label, NULL))
goto fail;
}
if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
goto fail;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
}
return true;
fail:
if (rules->secmark) {
for (i = 0; i < size; i++)
kfree(rules->secmark[i].label);
kfree(rules->secmark);
rules->secmark_count = 0;
rules->secmark = NULL;
}
e->pos = pos;
return false;
}
static bool unpack_rlimits(struct aa_ext *e, struct aa_ruleset *rules)
{
void *pos = e->pos;
/* rlimits are optional */
if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) {
u16 size;
int i;
u32 tmp = 0;
if (!aa_unpack_u32(e, &tmp, NULL))
goto fail;
rules->rlimits.mask = tmp;
if (!aa_unpack_array(e, NULL, &size) ||
size > RLIM_NLIMITS)
goto fail;
for (i = 0; i < size; i++) {
u64 tmp2 = 0;
int a = aa_map_resource(i);
if (!aa_unpack_u64(e, &tmp2, NULL))
goto fail;
rules->rlimits.limits[a].rlim_max = tmp2;
}
if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
goto fail;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
}
return true;
fail:
e->pos = pos;
return false;
}
static bool unpack_perm(struct aa_ext *e, u32 version, struct aa_perms *perm)
{
if (version != 1)
return false;
return aa_unpack_u32(e, &perm->allow, NULL) &&
aa_unpack_u32(e, &perm->allow, NULL) &&
aa_unpack_u32(e, &perm->deny, NULL) &&
aa_unpack_u32(e, &perm->subtree, NULL) &&
aa_unpack_u32(e, &perm->cond, NULL) &&
aa_unpack_u32(e, &perm->kill, NULL) &&
aa_unpack_u32(e, &perm->complain, NULL) &&
aa_unpack_u32(e, &perm->prompt, NULL) &&
aa_unpack_u32(e, &perm->audit, NULL) &&
aa_unpack_u32(e, &perm->quiet, NULL) &&
aa_unpack_u32(e, &perm->hide, NULL) &&
aa_unpack_u32(e, &perm->xindex, NULL) &&
aa_unpack_u32(e, &perm->tag, NULL) &&
aa_unpack_u32(e, &perm->label, NULL);
}
static ssize_t unpack_perms_table(struct aa_ext *e, struct aa_perms **perms)
{
void *pos = e->pos;
u16 size = 0;
AA_BUG(!perms);
/*
* policy perms are optional, in which case perms are embedded
* in the dfa accept table
*/
if (aa_unpack_nameX(e, AA_STRUCT, "perms")) {
int i;
u32 version;
if (!aa_unpack_u32(e, &version, "version"))
goto fail_reset;
if (!aa_unpack_array(e, NULL, &size))
goto fail_reset;
*perms = kcalloc(size, sizeof(struct aa_perms), GFP_KERNEL);
if (!*perms)
goto fail_reset;
for (i = 0; i < size; i++) {
if (!unpack_perm(e, version, &(*perms)[i]))
goto fail;
}
if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
goto fail;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
} else
*perms = NULL;
return size;
fail:
kfree(*perms);
fail_reset:
e->pos = pos;
return -EPROTO;
}
static int unpack_pdb(struct aa_ext *e, struct aa_policydb *policy,
bool required_dfa, bool required_trans,
const char **info)
{
void *pos = e->pos;
int i, flags, error = -EPROTO;
ssize_t size;
size = unpack_perms_table(e, &policy->perms);
if (size < 0) {
error = size;
policy->perms = NULL;
*info = "failed to unpack - perms";
goto fail;
}
policy->size = size;
if (policy->perms) {
/* perms table present accept is index */
flags = TO_ACCEPT1_FLAG(YYTD_DATA32);
} else {
/* packed perms in accept1 and accept2 */
flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
TO_ACCEPT2_FLAG(YYTD_DATA32);
}
policy->dfa = unpack_dfa(e, flags);
if (IS_ERR(policy->dfa)) {
error = PTR_ERR(policy->dfa);
policy->dfa = NULL;
*info = "failed to unpack - dfa";
goto fail;
} else if (!policy->dfa) {
if (required_dfa) {
*info = "missing required dfa";
goto fail;
}
goto out;
}
/*
* only unpack the following if a dfa is present
*
* sadly start was given different names for file and policydb
* but since it is optional we can try both
*/
if (!aa_unpack_u32(e, &policy->start[0], "start"))
/* default start state */
policy->start[0] = DFA_START;
if (!aa_unpack_u32(e, &policy->start[AA_CLASS_FILE], "dfa_start")) {
/* default start state for xmatch and file dfa */
policy->start[AA_CLASS_FILE] = DFA_START;
} /* setup class index */
for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) {
policy->start[i] = aa_dfa_next(policy->dfa, policy->start[0],
i);
}
if (!unpack_trans_table(e, &policy->trans) && required_trans) {
*info = "failed to unpack profile transition table";
goto fail;
}
/* TODO: move compat mapping here, requires dfa merging first */
/* TODO: move verify here, it has to be done after compat mappings */
out:
return 0;
fail:
e->pos = pos;
return error;
}
static u32 strhash(const void *data, u32 len, u32 seed)
{
const char * const *key = data;
return jhash(*key, strlen(*key), seed);
}
static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
{
const struct aa_data *data = obj;
const char * const *key = arg->key;
return strcmp(data->key, *key);
}
/**
* unpack_profile - unpack a serialized profile
* @e: serialized data extent information (NOT NULL)
* @ns_name: pointer of newly allocated copy of %NULL in case of error
*
* NOTE: unpack profile sets audit struct if there is a failure
*/
static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
{
struct aa_ruleset *rules;
struct aa_profile *profile = NULL;
const char *tmpname, *tmpns = NULL, *name = NULL;
const char *info = "failed to unpack profile";
size_t ns_len;
struct rhashtable_params params = { 0 };
char *key = NULL;
struct aa_data *data;
int error = -EPROTO;
kernel_cap_t tmpcap;
u32 tmp;
*ns_name = NULL;
/* check that we have the right struct being passed */
if (!aa_unpack_nameX(e, AA_STRUCT, "profile"))
goto fail;
if (!aa_unpack_str(e, &name, NULL))
goto fail;
if (*name == '\0')
goto fail;
tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
if (tmpns) {
*ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
if (!*ns_name) {
info = "out of memory";
error = -ENOMEM;
goto fail;
}
name = tmpname;
}
profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
if (!profile) {
info = "out of memory";
error = -ENOMEM;
goto fail;
}
rules = list_first_entry(&profile->rules, typeof(*rules), list);
/* profile renaming is optional */
(void) aa_unpack_str(e, &profile->rename, "rename");
/* attachment string is optional */
(void) aa_unpack_str(e, &profile->attach.xmatch_str, "attach");
/* xmatch is optional and may be NULL */
error = unpack_pdb(e, &profile->attach.xmatch, false, false, &info);
if (error) {
info = "bad xmatch";
goto fail;
}
/* neither xmatch_len not xmatch_perms are optional if xmatch is set */
if (profile->attach.xmatch.dfa) {
if (!aa_unpack_u32(e, &tmp, NULL)) {
info = "missing xmatch len";
goto fail;
}
profile->attach.xmatch_len = tmp;
profile->attach.xmatch.start[AA_CLASS_XMATCH] = DFA_START;
if (!profile->attach.xmatch.perms) {
error = aa_compat_map_xmatch(&profile->attach.xmatch);
if (error) {
info = "failed to convert xmatch permission table";
goto fail;
}
}
}
/* disconnected attachment string is optional */
(void) aa_unpack_str(e, &profile->disconnected, "disconnected");
/* per profile debug flags (complain, audit) */
if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
info = "profile missing flags";
goto fail;
}
info = "failed to unpack profile flags";
if (!aa_unpack_u32(e, &tmp, NULL))
goto fail;
if (tmp & PACKED_FLAG_HAT)
profile->label.flags |= FLAG_HAT;
if (tmp & PACKED_FLAG_DEBUG1)
profile->label.flags |= FLAG_DEBUG1;
if (tmp & PACKED_FLAG_DEBUG2)
profile->label.flags |= FLAG_DEBUG2;
if (!aa_unpack_u32(e, &tmp, NULL))
goto fail;
if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
profile->mode = APPARMOR_COMPLAIN;
} else if (tmp == PACKED_MODE_ENFORCE) {
profile->mode = APPARMOR_ENFORCE;
} else if (tmp == PACKED_MODE_KILL) {
profile->mode = APPARMOR_KILL;
} else if (tmp == PACKED_MODE_UNCONFINED) {
profile->mode = APPARMOR_UNCONFINED;
profile->label.flags |= FLAG_UNCONFINED;
} else if (tmp == PACKED_MODE_USER) {
profile->mode = APPARMOR_USER;
} else {
goto fail;
}
if (!aa_unpack_u32(e, &tmp, NULL))
goto fail;
if (tmp)
profile->audit = AUDIT_ALL;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
/* path_flags is optional */
if (aa_unpack_u32(e, &profile->path_flags, "path_flags"))
profile->path_flags |= profile->label.flags &
PATH_MEDIATE_DELETED;
else
/* set a default value if path_flags field is not present */
profile->path_flags = PATH_MEDIATE_DELETED;
info = "failed to unpack profile capabilities";
if (!aa_unpack_cap_low(e, &rules->caps.allow, NULL))
goto fail;
if (!aa_unpack_cap_low(e, &rules->caps.audit, NULL))
goto fail;
if (!aa_unpack_cap_low(e, &rules->caps.quiet, NULL))
goto fail;
if (!aa_unpack_cap_low(e, &tmpcap, NULL))
goto fail;
info = "failed to unpack upper profile capabilities";
if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) {
/* optional upper half of 64 bit caps */
if (!aa_unpack_cap_high(e, &rules->caps.allow, NULL))
goto fail;
if (!aa_unpack_cap_high(e, &rules->caps.audit, NULL))
goto fail;
if (!aa_unpack_cap_high(e, &rules->caps.quiet, NULL))
goto fail;
if (!aa_unpack_cap_high(e, &tmpcap, NULL))
goto fail;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
}
info = "failed to unpack extended profile capabilities";
if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) {
/* optional extended caps mediation mask */
if (!aa_unpack_cap_low(e, &rules->caps.extended, NULL))
goto fail;
if (!aa_unpack_cap_high(e, &rules->caps.extended, NULL))
goto fail;
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
}
if (!unpack_xattrs(e, profile)) {
info = "failed to unpack profile xattrs";
goto fail;
}
if (!unpack_rlimits(e, rules)) {
info = "failed to unpack profile rlimits";
goto fail;
}
if (!unpack_secmark(e, rules)) {
info = "failed to unpack profile secmark rules";
goto fail;
}
if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) {
/* generic policy dfa - optional and may be NULL */
info = "failed to unpack policydb";
error = unpack_pdb(e, &rules->policy, true, false,
&info);
if (error)
goto fail;
/* Fixup: drop when we get rid of start array */
if (aa_dfa_next(rules->policy.dfa, rules->policy.start[0],
AA_CLASS_FILE))
rules->policy.start[AA_CLASS_FILE] =
aa_dfa_next(rules->policy.dfa,
rules->policy.start[0],
AA_CLASS_FILE);
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
if (!rules->policy.perms) {
error = aa_compat_map_policy(&rules->policy,
e->version);
if (error) {
info = "failed to remap policydb permission table";
goto fail;
}
}
} else {
rules->policy.dfa = aa_get_dfa(nulldfa);
rules->policy.perms = kcalloc(2, sizeof(struct aa_perms),
GFP_KERNEL);
if (!rules->policy.perms)
goto fail;
rules->policy.size = 2;
}
/* get file rules */
error = unpack_pdb(e, &rules->file, false, true, &info);
if (error) {
goto fail;
} else if (rules->file.dfa) {
if (!rules->file.perms) {
error = aa_compat_map_file(&rules->file);
if (error) {
info = "failed to remap file permission table";
goto fail;
}
}
} else if (rules->policy.dfa &&
rules->policy.start[AA_CLASS_FILE]) {
rules->file.dfa = aa_get_dfa(rules->policy.dfa);
rules->file.start[AA_CLASS_FILE] = rules->policy.start[AA_CLASS_FILE];
rules->file.perms = kcalloc(rules->policy.size,
sizeof(struct aa_perms),
GFP_KERNEL);
if (!rules->file.perms)
goto fail;
memcpy(rules->file.perms, rules->policy.perms,
rules->policy.size * sizeof(struct aa_perms));
rules->file.size = rules->policy.size;
} else {
rules->file.dfa = aa_get_dfa(nulldfa);
rules->file.perms = kcalloc(2, sizeof(struct aa_perms),
GFP_KERNEL);
if (!rules->file.perms)
goto fail;
rules->file.size = 2;
}
error = -EPROTO;
if (aa_unpack_nameX(e, AA_STRUCT, "data")) {
info = "out of memory";
profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
if (!profile->data) {
error = -ENOMEM;
goto fail;
}
params.nelem_hint = 3;
params.key_len = sizeof(void *);
params.key_offset = offsetof(struct aa_data, key);
params.head_offset = offsetof(struct aa_data, head);
params.hashfn = strhash;
params.obj_cmpfn = datacmp;
if (rhashtable_init(profile->data, ¶ms)) {
info = "failed to init key, value hash table";
goto fail;
}
while (aa_unpack_strdup(e, &key, NULL)) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
kfree_sensitive(key);
error = -ENOMEM;
goto fail;
}
data->key = key;
data->size = aa_unpack_blob(e, &data->data, NULL);
data->data = kvmemdup(data->data, data->size, GFP_KERNEL);
if (data->size && !data->data) {
kfree_sensitive(data->key);
kfree_sensitive(data);
error = -ENOMEM;
goto fail;
}
if (rhashtable_insert_fast(profile->data, &data->head,
profile->data->p)) {
kfree_sensitive(data->key);
kfree_sensitive(data);
info = "failed to insert data to table";
goto fail;
}
}
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
info = "failed to unpack end of key, value data table";
goto fail;
}
}
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
info = "failed to unpack end of profile";
goto fail;
}
return profile;
fail:
if (error == 0)
/* default error covers most cases */
error = -EPROTO;
if (*ns_name) {
kfree(*ns_name);
*ns_name = NULL;
}
if (profile)
name = NULL;
else if (!name)
name = "unknown";
audit_iface(profile, NULL, name, info, e, error);
aa_free_profile(profile);
return ERR_PTR(error);
}
/**
* verify_header - unpack serialized stream header
* @e: serialized data read head (NOT NULL)
* @required: whether the header is required or optional
* @ns: Returns - namespace if one is specified else NULL (NOT NULL)
*
* Returns: error or 0 if header is good
*/
static int verify_header(struct aa_ext *e, int required, const char **ns)
{
int error = -EPROTONOSUPPORT;
const char *name = NULL;
*ns = NULL;
/* get the interface version */
if (!aa_unpack_u32(e, &e->version, "version")) {
if (required) {
audit_iface(NULL, NULL, NULL, "invalid profile format",
e, error);
return error;
}
}
/* Check that the interface version is currently supported.
* if not specified use previous version
* Mask off everything that is not kernel abi version
*/
if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v9)) {
audit_iface(NULL, NULL, NULL, "unsupported interface version",
e, error);
return error;
}
/* read the namespace if present */
if (aa_unpack_str(e, &name, "namespace")) {
if (*name == '\0') {
audit_iface(NULL, NULL, NULL, "invalid namespace name",
e, error);
return error;
}
if (*ns && strcmp(*ns, name)) {
audit_iface(NULL, NULL, NULL, "invalid ns change", e,
error);
} else if (!*ns) {
*ns = kstrdup(name, GFP_KERNEL);
if (!*ns)
return -ENOMEM;
}
}
return 0;
}
/**
* verify_dfa_accept_index - verify accept indexes are in range of perms table
* @dfa: the dfa to check accept indexes are in range
* table_size: the permission table size the indexes should be within
*/
static bool verify_dfa_accept_index(struct aa_dfa *dfa, int table_size)
{
int i;
for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
if (ACCEPT_TABLE(dfa)[i] >= table_size)
return false;
}
return true;
}
static bool verify_perm(struct aa_perms *perm)
{
/* TODO: allow option to just force the perms into a valid state */
if (perm->allow & perm->deny)
return false;
if (perm->subtree & ~perm->allow)
return false;
if (perm->cond & (perm->allow | perm->deny))
return false;
if (perm->kill & perm->allow)
return false;
if (perm->complain & (perm->allow | perm->deny))
return false;
if (perm->prompt & (perm->allow | perm->deny))
return false;
if (perm->complain & perm->prompt)
return false;
if (perm->hide & perm->allow)
return false;
return true;
}
static bool verify_perms(struct aa_policydb *pdb)
{
int i;
for (i = 0; i < pdb->size; i++) {
if (!verify_perm(&pdb->perms[i]))
return false;
/* verify indexes into str table */
if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE &&
(pdb->perms[i].xindex & AA_X_INDEX_MASK) >= pdb->trans.size)
return false;
if (pdb->perms[i].tag && pdb->perms[i].tag >= pdb->trans.size)
return false;
if (pdb->perms[i].label &&
pdb->perms[i].label >= pdb->trans.size)
return false;
}
return true;
}
/**
* verify_profile - Do post unpack analysis to verify profile consistency
* @profile: profile to verify (NOT NULL)
*
* Returns: 0 if passes verification else error
*
* This verification is post any unpack mapping or changes
*/
static int verify_profile(struct aa_profile *profile)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
if (!rules)
return 0;
if ((rules->file.dfa && !verify_dfa_accept_index(rules->file.dfa,
rules->file.size)) ||
(rules->policy.dfa &&
!verify_dfa_accept_index(rules->policy.dfa, rules->policy.size))) {
audit_iface(profile, NULL, NULL,
"Unpack: Invalid named transition", NULL, -EPROTO);
return -EPROTO;
}
if (!verify_perms(&rules->file)) {
audit_iface(profile, NULL, NULL,
"Unpack: Invalid perm index", NULL, -EPROTO);
return -EPROTO;
}
if (!verify_perms(&rules->policy)) {
audit_iface(profile, NULL, NULL,
"Unpack: Invalid perm index", NULL, -EPROTO);
return -EPROTO;
}
if (!verify_perms(&profile->attach.xmatch)) {
audit_iface(profile, NULL, NULL,
"Unpack: Invalid perm index", NULL, -EPROTO);
return -EPROTO;
}
return 0;
}
void aa_load_ent_free(struct aa_load_ent *ent)
{
if (ent) {
aa_put_profile(ent->rename);
aa_put_profile(ent->old);
aa_put_profile(ent->new);
kfree(ent->ns_name);
kfree_sensitive(ent);
}
}
struct aa_load_ent *aa_load_ent_alloc(void)
{
struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
if (ent)
INIT_LIST_HEAD(&ent->list);
return ent;
}
static int compress_zstd(const char *src, size_t slen, char **dst, size_t *dlen)
{
#ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
const zstd_parameters params =
zstd_get_params(aa_g_rawdata_compression_level, slen);
const size_t wksp_len = zstd_cctx_workspace_bound(¶ms.cParams);
void *wksp = NULL;
zstd_cctx *ctx = NULL;
size_t out_len = zstd_compress_bound(slen);
void *out = NULL;
int ret = 0;
out = kvzalloc(out_len, GFP_KERNEL);
if (!out) {
ret = -ENOMEM;
goto cleanup;
}
wksp = kvzalloc(wksp_len, GFP_KERNEL);
if (!wksp) {
ret = -ENOMEM;
goto cleanup;
}
ctx = zstd_init_cctx(wksp, wksp_len);
if (!ctx) {
ret = -EINVAL;
goto cleanup;
}
out_len = zstd_compress_cctx(ctx, out, out_len, src, slen, ¶ms);
if (zstd_is_error(out_len) || out_len >= slen) {
ret = -EINVAL;
goto cleanup;
}
if (is_vmalloc_addr(out)) {
*dst = kvzalloc(out_len, GFP_KERNEL);
if (*dst) {
memcpy(*dst, out, out_len);
kvfree(out);
out = NULL;
}
} else {
/*
* If the staging buffer was kmalloc'd, then using krealloc is
* probably going to be faster. The destination buffer will
* always be smaller, so it's just shrunk, avoiding a memcpy
*/
*dst = krealloc(out, out_len, GFP_KERNEL);
}
if (!*dst) {
ret = -ENOMEM;
goto cleanup;
}
*dlen = out_len;
cleanup:
if (ret) {
kvfree(out);
*dst = NULL;
}
kvfree(wksp);
return ret;
#else
*dlen = slen;
return 0;
#endif
}
static int compress_loaddata(struct aa_loaddata *data)
{
AA_BUG(data->compressed_size > 0);
/*
* Shortcut the no compression case, else we increase the amount of
* storage required by a small amount
*/
if (aa_g_rawdata_compression_level != 0) {
void *udata = data->data;
int error = compress_zstd(udata, data->size, &data->data,
&data->compressed_size);
if (error) {
data->compressed_size = data->size;
return error;
}
if (udata != data->data)
kvfree(udata);
} else
data->compressed_size = data->size;
return 0;
}
/**
* aa_unpack - unpack packed binary profile(s) data loaded from user space
* @udata: user data copied to kmem (NOT NULL)
* @lh: list to place unpacked profiles in a aa_repl_ws
* @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
*
* Unpack user data and return refcounted allocated profile(s) stored in
* @lh in order of discovery, with the list chain stored in base.list
* or error
*
* Returns: profile(s) on @lh else error pointer if fails to unpack
*/
int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
const char **ns)
{
struct aa_load_ent *tmp, *ent;
struct aa_profile *profile = NULL;
char *ns_name = NULL;
int error;
struct aa_ext e = {
.start = udata->data,
.end = udata->data + udata->size,
.pos = udata->data,
};
*ns = NULL;
while (e.pos < e.end) {
void *start;
error = verify_header(&e, e.pos == e.start, ns);
if (error)
goto fail;
start = e.pos;
profile = unpack_profile(&e, &ns_name);
if (IS_ERR(profile)) {
error = PTR_ERR(profile);
goto fail;
}
error = verify_profile(profile);
if (error)
goto fail_profile;
if (aa_g_hash_policy)
error = aa_calc_profile_hash(profile, e.version, start,
e.pos - start);
if (error)
goto fail_profile;
ent = aa_load_ent_alloc();
if (!ent) {
error = -ENOMEM;
goto fail_profile;
}
ent->new = profile;
ent->ns_name = ns_name;
ns_name = NULL;
list_add_tail(&ent->list, lh);
}
udata->abi = e.version & K_ABI_MASK;
if (aa_g_hash_policy) {
udata->hash = aa_calc_hash(udata->data, udata->size);
if (IS_ERR(udata->hash)) {
error = PTR_ERR(udata->hash);
udata->hash = NULL;
goto fail;
}
}
if (aa_g_export_binary) {
error = compress_loaddata(udata);
if (error)
goto fail;
}
return 0;
fail_profile:
kfree(ns_name);
aa_put_profile(profile);
fail:
list_for_each_entry_safe(ent, tmp, lh, list) {
list_del_init(&ent->list);
aa_load_ent_free(ent);
}
return error;
}
| linux-master | security/apparmor/policy_unpack.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* KUnit tests for AppArmor's policy unpack.
*/
#include <kunit/test.h>
#include <kunit/visibility.h>
#include "include/policy.h"
#include "include/policy_unpack.h"
#define TEST_STRING_NAME "TEST_STRING"
#define TEST_STRING_DATA "testing"
#define TEST_STRING_BUF_OFFSET \
(3 + strlen(TEST_STRING_NAME) + 1)
#define TEST_U32_NAME "U32_TEST"
#define TEST_U32_DATA ((u32)0x01020304)
#define TEST_NAMED_U32_BUF_OFFSET \
(TEST_STRING_BUF_OFFSET + 3 + strlen(TEST_STRING_DATA) + 1)
#define TEST_U32_BUF_OFFSET \
(TEST_NAMED_U32_BUF_OFFSET + 3 + strlen(TEST_U32_NAME) + 1)
#define TEST_U16_OFFSET (TEST_U32_BUF_OFFSET + 3)
#define TEST_U16_DATA ((u16)(TEST_U32_DATA >> 16))
#define TEST_U64_NAME "U64_TEST"
#define TEST_U64_DATA ((u64)0x0102030405060708)
#define TEST_NAMED_U64_BUF_OFFSET (TEST_U32_BUF_OFFSET + sizeof(u32) + 1)
#define TEST_U64_BUF_OFFSET \
(TEST_NAMED_U64_BUF_OFFSET + 3 + strlen(TEST_U64_NAME) + 1)
#define TEST_BLOB_NAME "BLOB_TEST"
#define TEST_BLOB_DATA "\xde\xad\x00\xbe\xef"
#define TEST_BLOB_DATA_SIZE (ARRAY_SIZE(TEST_BLOB_DATA))
#define TEST_NAMED_BLOB_BUF_OFFSET (TEST_U64_BUF_OFFSET + sizeof(u64) + 1)
#define TEST_BLOB_BUF_OFFSET \
(TEST_NAMED_BLOB_BUF_OFFSET + 3 + strlen(TEST_BLOB_NAME) + 1)
#define TEST_ARRAY_NAME "ARRAY_TEST"
#define TEST_ARRAY_SIZE 16
#define TEST_NAMED_ARRAY_BUF_OFFSET \
(TEST_BLOB_BUF_OFFSET + 5 + TEST_BLOB_DATA_SIZE)
#define TEST_ARRAY_BUF_OFFSET \
(TEST_NAMED_ARRAY_BUF_OFFSET + 3 + strlen(TEST_ARRAY_NAME) + 1)
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
struct policy_unpack_fixture {
struct aa_ext *e;
size_t e_size;
};
static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
struct kunit *test, size_t buf_size)
{
char *buf;
struct aa_ext *e;
buf = kunit_kzalloc(test, buf_size, GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, buf);
e = kunit_kmalloc(test, sizeof(*e), GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, e);
e->start = buf;
e->end = e->start + buf_size;
e->pos = e->start;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_STRING_NAME) + 1;
strscpy(buf + 3, TEST_STRING_NAME, e->end - (void *)(buf + 3));
buf = e->start + TEST_STRING_BUF_OFFSET;
*buf = AA_STRING;
*(buf + 1) = strlen(TEST_STRING_DATA) + 1;
strscpy(buf + 3, TEST_STRING_DATA, e->end - (void *)(buf + 3));
buf = e->start + TEST_NAMED_U32_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_U32_NAME) + 1;
strscpy(buf + 3, TEST_U32_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
*((u32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = TEST_U32_DATA;
buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_U64_NAME) + 1;
strscpy(buf + 3, TEST_U64_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_U64_NAME) + 1) = AA_U64;
*((u64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = TEST_U64_DATA;
buf = e->start + TEST_NAMED_BLOB_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_BLOB_NAME) + 1;
strscpy(buf + 3, TEST_BLOB_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_BLOB_NAME) + 1) = AA_BLOB;
*(buf + 3 + strlen(TEST_BLOB_NAME) + 2) = TEST_BLOB_DATA_SIZE;
memcpy(buf + 3 + strlen(TEST_BLOB_NAME) + 6,
TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE);
buf = e->start + TEST_NAMED_ARRAY_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
strscpy(buf + 3, TEST_ARRAY_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
*((u16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = TEST_ARRAY_SIZE;
return e;
}
static int policy_unpack_test_init(struct kunit *test)
{
size_t e_size = TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1;
struct policy_unpack_fixture *puf;
puf = kunit_kmalloc(test, sizeof(*puf), GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, puf);
puf->e_size = e_size;
puf->e = build_aa_ext_struct(puf, test, e_size);
test->priv = puf;
return 0;
}
static void policy_unpack_test_inbounds_when_inbounds(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, 0));
KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, puf->e_size / 2));
KUNIT_EXPECT_TRUE(test, aa_inbounds(puf->e, puf->e_size));
}
static void policy_unpack_test_inbounds_when_out_of_bounds(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
KUNIT_EXPECT_FALSE(test, aa_inbounds(puf->e, puf->e_size + 1));
}
static void policy_unpack_test_unpack_array_with_null_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
u16 array_size = 0;
puf->e->pos += TEST_ARRAY_BUF_OFFSET;
KUNIT_EXPECT_TRUE(test, aa_unpack_array(puf->e, NULL, &array_size));
KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1);
}
static void policy_unpack_test_unpack_array_with_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_ARRAY_NAME;
u16 array_size = 0;
puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
KUNIT_EXPECT_TRUE(test, aa_unpack_array(puf->e, name, &array_size));
KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1);
}
static void policy_unpack_test_unpack_array_out_of_bounds(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_ARRAY_NAME;
u16 array_size;
puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
puf->e->end = puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16);
KUNIT_EXPECT_FALSE(test, aa_unpack_array(puf->e, name, &array_size));
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_NAMED_ARRAY_BUF_OFFSET);
}
static void policy_unpack_test_unpack_blob_with_null_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
char *blob = NULL;
size_t size;
puf->e->pos += TEST_BLOB_BUF_OFFSET;
size = aa_unpack_blob(puf->e, &blob, NULL);
KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
KUNIT_EXPECT_TRUE(test,
memcmp(blob, TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE) == 0);
}
static void policy_unpack_test_unpack_blob_with_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
char *blob = NULL;
size_t size;
puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
size = aa_unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
KUNIT_EXPECT_TRUE(test,
memcmp(blob, TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE) == 0);
}
static void policy_unpack_test_unpack_blob_out_of_bounds(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
char *blob = NULL;
void *start;
int size;
puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
start = puf->e->pos;
puf->e->end = puf->e->start + TEST_BLOB_BUF_OFFSET
+ TEST_BLOB_DATA_SIZE - 1;
size = aa_unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
}
static void policy_unpack_test_unpack_str_with_null_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char *string = NULL;
size_t size;
puf->e->pos += TEST_STRING_BUF_OFFSET;
size = aa_unpack_str(puf->e, &string, NULL);
KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
}
static void policy_unpack_test_unpack_str_with_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char *string = NULL;
size_t size;
size = aa_unpack_str(puf->e, &string, TEST_STRING_NAME);
KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
}
static void policy_unpack_test_unpack_str_out_of_bounds(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char *string = NULL;
void *start = puf->e->pos;
int size;
puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ strlen(TEST_STRING_DATA) - 1;
size = aa_unpack_str(puf->e, &string, TEST_STRING_NAME);
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
}
static void policy_unpack_test_unpack_strdup_with_null_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
char *string = NULL;
size_t size;
puf->e->pos += TEST_STRING_BUF_OFFSET;
size = aa_unpack_strdup(puf->e, &string, NULL);
KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
KUNIT_EXPECT_FALSE(test,
((uintptr_t)puf->e->start <= (uintptr_t)string)
&& ((uintptr_t)string <= (uintptr_t)puf->e->end));
KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
}
static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
char *string = NULL;
size_t size;
size = aa_unpack_strdup(puf->e, &string, TEST_STRING_NAME);
KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
KUNIT_EXPECT_FALSE(test,
((uintptr_t)puf->e->start <= (uintptr_t)string)
&& ((uintptr_t)string <= (uintptr_t)puf->e->end));
KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
}
static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
void *start = puf->e->pos;
char *string = NULL;
int size;
puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ strlen(TEST_STRING_DATA) - 1;
size = aa_unpack_strdup(puf->e, &string, TEST_STRING_NAME);
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_NULL(test, string);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
}
static void policy_unpack_test_unpack_nameX_with_null_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
bool success;
puf->e->pos += TEST_U32_BUF_OFFSET;
success = aa_unpack_nameX(puf->e, AA_U32, NULL);
KUNIT_EXPECT_TRUE(test, success);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_U32_BUF_OFFSET + 1);
}
static void policy_unpack_test_unpack_nameX_with_wrong_code(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
bool success;
puf->e->pos += TEST_U32_BUF_OFFSET;
success = aa_unpack_nameX(puf->e, AA_BLOB, NULL);
KUNIT_EXPECT_FALSE(test, success);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_U32_BUF_OFFSET);
}
static void policy_unpack_test_unpack_nameX_with_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_U32_NAME;
bool success;
puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
success = aa_unpack_nameX(puf->e, AA_U32, name);
KUNIT_EXPECT_TRUE(test, success);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_U32_BUF_OFFSET + 1);
}
static void policy_unpack_test_unpack_nameX_with_wrong_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
static const char name[] = "12345678";
bool success;
puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
success = aa_unpack_nameX(puf->e, AA_U32, name);
KUNIT_EXPECT_FALSE(test, success);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_NAMED_U32_BUF_OFFSET);
}
static void policy_unpack_test_unpack_u16_chunk_basic(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
char *chunk = NULL;
size_t size;
puf->e->pos += TEST_U16_OFFSET;
/*
* WARNING: For unit testing purposes, we're pushing puf->e->end past
* the end of the allocated memory. Doing anything other than comparing
* memory addresses is dangerous.
*/
puf->e->end += TEST_U16_DATA;
size = aa_unpack_u16_chunk(puf->e, &chunk);
KUNIT_EXPECT_PTR_EQ(test, chunk,
puf->e->start + TEST_U16_OFFSET + 2);
KUNIT_EXPECT_EQ(test, size, TEST_U16_DATA);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, (chunk + TEST_U16_DATA));
}
static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_1(
struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
char *chunk = NULL;
size_t size;
puf->e->pos = puf->e->end - 1;
size = aa_unpack_u16_chunk(puf->e, &chunk);
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_NULL(test, chunk);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, puf->e->end - 1);
}
static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_2(
struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
char *chunk = NULL;
size_t size;
puf->e->pos += TEST_U16_OFFSET;
/*
* WARNING: For unit testing purposes, we're pushing puf->e->end past
* the end of the allocated memory. Doing anything other than comparing
* memory addresses is dangerous.
*/
puf->e->end = puf->e->pos + TEST_U16_DATA - 1;
size = aa_unpack_u16_chunk(puf->e, &chunk);
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_NULL(test, chunk);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, puf->e->start + TEST_U16_OFFSET);
}
static void policy_unpack_test_unpack_u32_with_null_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
bool success;
u32 data = 0;
puf->e->pos += TEST_U32_BUF_OFFSET;
success = aa_unpack_u32(puf->e, &data, NULL);
KUNIT_EXPECT_TRUE(test, success);
KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32) + 1);
}
static void policy_unpack_test_unpack_u32_with_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_U32_NAME;
bool success;
u32 data = 0;
puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
success = aa_unpack_u32(puf->e, &data, name);
KUNIT_EXPECT_TRUE(test, success);
KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32) + 1);
}
static void policy_unpack_test_unpack_u32_out_of_bounds(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_U32_NAME;
bool success;
u32 data = 0;
puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
puf->e->end = puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32);
success = aa_unpack_u32(puf->e, &data, name);
KUNIT_EXPECT_FALSE(test, success);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_NAMED_U32_BUF_OFFSET);
}
static void policy_unpack_test_unpack_u64_with_null_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
bool success;
u64 data = 0;
puf->e->pos += TEST_U64_BUF_OFFSET;
success = aa_unpack_u64(puf->e, &data, NULL);
KUNIT_EXPECT_TRUE(test, success);
KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64) + 1);
}
static void policy_unpack_test_unpack_u64_with_name(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_U64_NAME;
bool success;
u64 data = 0;
puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
success = aa_unpack_u64(puf->e, &data, name);
KUNIT_EXPECT_TRUE(test, success);
KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64) + 1);
}
static void policy_unpack_test_unpack_u64_out_of_bounds(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
const char name[] = TEST_U64_NAME;
bool success;
u64 data = 0;
puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
puf->e->end = puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64);
success = aa_unpack_u64(puf->e, &data, name);
KUNIT_EXPECT_FALSE(test, success);
KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
puf->e->start + TEST_NAMED_U64_BUF_OFFSET);
}
static void policy_unpack_test_unpack_X_code_match(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
bool success = aa_unpack_X(puf->e, AA_NAME);
KUNIT_EXPECT_TRUE(test, success);
KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start + 1);
}
static void policy_unpack_test_unpack_X_code_mismatch(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
bool success = aa_unpack_X(puf->e, AA_STRING);
KUNIT_EXPECT_FALSE(test, success);
KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start);
}
static void policy_unpack_test_unpack_X_out_of_bounds(struct kunit *test)
{
struct policy_unpack_fixture *puf = test->priv;
bool success;
puf->e->pos = puf->e->end;
success = aa_unpack_X(puf->e, AA_NAME);
KUNIT_EXPECT_FALSE(test, success);
}
static struct kunit_case apparmor_policy_unpack_test_cases[] = {
KUNIT_CASE(policy_unpack_test_inbounds_when_inbounds),
KUNIT_CASE(policy_unpack_test_inbounds_when_out_of_bounds),
KUNIT_CASE(policy_unpack_test_unpack_array_with_null_name),
KUNIT_CASE(policy_unpack_test_unpack_array_with_name),
KUNIT_CASE(policy_unpack_test_unpack_array_out_of_bounds),
KUNIT_CASE(policy_unpack_test_unpack_blob_with_null_name),
KUNIT_CASE(policy_unpack_test_unpack_blob_with_name),
KUNIT_CASE(policy_unpack_test_unpack_blob_out_of_bounds),
KUNIT_CASE(policy_unpack_test_unpack_nameX_with_null_name),
KUNIT_CASE(policy_unpack_test_unpack_nameX_with_wrong_code),
KUNIT_CASE(policy_unpack_test_unpack_nameX_with_name),
KUNIT_CASE(policy_unpack_test_unpack_nameX_with_wrong_name),
KUNIT_CASE(policy_unpack_test_unpack_str_with_null_name),
KUNIT_CASE(policy_unpack_test_unpack_str_with_name),
KUNIT_CASE(policy_unpack_test_unpack_str_out_of_bounds),
KUNIT_CASE(policy_unpack_test_unpack_strdup_with_null_name),
KUNIT_CASE(policy_unpack_test_unpack_strdup_with_name),
KUNIT_CASE(policy_unpack_test_unpack_strdup_out_of_bounds),
KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_basic),
KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_out_of_bounds_1),
KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_out_of_bounds_2),
KUNIT_CASE(policy_unpack_test_unpack_u32_with_null_name),
KUNIT_CASE(policy_unpack_test_unpack_u32_with_name),
KUNIT_CASE(policy_unpack_test_unpack_u32_out_of_bounds),
KUNIT_CASE(policy_unpack_test_unpack_u64_with_null_name),
KUNIT_CASE(policy_unpack_test_unpack_u64_with_name),
KUNIT_CASE(policy_unpack_test_unpack_u64_out_of_bounds),
KUNIT_CASE(policy_unpack_test_unpack_X_code_match),
KUNIT_CASE(policy_unpack_test_unpack_X_code_mismatch),
KUNIT_CASE(policy_unpack_test_unpack_X_out_of_bounds),
{},
};
static struct kunit_suite apparmor_policy_unpack_test_module = {
.name = "apparmor_policy_unpack",
.init = policy_unpack_test_init,
.test_cases = apparmor_policy_unpack_test_cases,
};
kunit_test_suite(apparmor_policy_unpack_test_module);
MODULE_LICENSE("GPL");
| linux-master | security/apparmor/policy_unpack_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor functions for unpacking policy loaded
* from userspace.
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2022 Canonical Ltd.
*
* Code to provide backwards compatibility with older policy versions,
* by converting/mapping older policy formats into the newer internal
* formats.
*/
#include <linux/ctype.h>
#include <linux/errno.h>
#include "include/lib.h"
#include "include/policy_unpack.h"
#include "include/policy_compat.h"
/* remap old accept table embedded permissions to separate permission table */
static u32 dfa_map_xindex(u16 mask)
{
u16 old_index = (mask >> 10) & 0xf;
u32 index = 0;
if (mask & 0x100)
index |= AA_X_UNSAFE;
if (mask & 0x200)
index |= AA_X_INHERIT;
if (mask & 0x80)
index |= AA_X_UNCONFINED;
if (old_index == 1) {
index |= AA_X_UNCONFINED;
} else if (old_index == 2) {
index |= AA_X_NAME;
} else if (old_index == 3) {
index |= AA_X_NAME | AA_X_CHILD;
} else if (old_index) {
index |= AA_X_TABLE;
index |= old_index - 4;
}
return index;
}
/*
* map old dfa inline permissions to new format
*/
#define dfa_user_allow(dfa, state) (((ACCEPT_TABLE(dfa)[state]) & 0x7f) | \
((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
#define dfa_user_xbits(dfa, state) (((ACCEPT_TABLE(dfa)[state]) >> 7) & 0x7f)
#define dfa_user_audit(dfa, state) ((ACCEPT_TABLE2(dfa)[state]) & 0x7f)
#define dfa_user_quiet(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 7) & 0x7f)
#define dfa_user_xindex(dfa, state) \
(dfa_map_xindex(ACCEPT_TABLE(dfa)[state] & 0x3fff))
#define dfa_other_allow(dfa, state) ((((ACCEPT_TABLE(dfa)[state]) >> 14) & \
0x7f) | \
((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
#define dfa_other_xbits(dfa, state) \
((((ACCEPT_TABLE(dfa)[state]) >> 7) >> 14) & 0x7f)
#define dfa_other_audit(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 14) & 0x7f)
#define dfa_other_quiet(dfa, state) \
((((ACCEPT_TABLE2(dfa)[state]) >> 7) >> 14) & 0x7f)
#define dfa_other_xindex(dfa, state) \
dfa_map_xindex((ACCEPT_TABLE(dfa)[state] >> 14) & 0x3fff)
/**
* map_old_perms - map old file perms layout to the new layout
* @old: permission set in old mapping
*
* Returns: new permission mapping
*/
static u32 map_old_perms(u32 old)
{
u32 new = old & 0xf;
if (old & MAY_READ)
new |= AA_MAY_GETATTR | AA_MAY_OPEN;
if (old & MAY_WRITE)
new |= AA_MAY_SETATTR | AA_MAY_CREATE | AA_MAY_DELETE |
AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_OPEN;
if (old & 0x10)
new |= AA_MAY_LINK;
/* the old mapping lock and link_subset flags where overlaid
* and use was determined by part of a pair that they were in
*/
if (old & 0x20)
new |= AA_MAY_LOCK | AA_LINK_SUBSET;
if (old & 0x40) /* AA_EXEC_MMAP */
new |= AA_EXEC_MMAP;
return new;
}
static void compute_fperms_allow(struct aa_perms *perms, struct aa_dfa *dfa,
aa_state_t state)
{
perms->allow |= AA_MAY_GETATTR;
/* change_profile wasn't determined by ownership in old mapping */
if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
perms->allow |= AA_MAY_CHANGE_PROFILE;
if (ACCEPT_TABLE(dfa)[state] & 0x40000000)
perms->allow |= AA_MAY_ONEXEC;
}
static struct aa_perms compute_fperms_user(struct aa_dfa *dfa,
aa_state_t state)
{
struct aa_perms perms = { };
perms.allow = map_old_perms(dfa_user_allow(dfa, state));
perms.audit = map_old_perms(dfa_user_audit(dfa, state));
perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
perms.xindex = dfa_user_xindex(dfa, state);
compute_fperms_allow(&perms, dfa, state);
return perms;
}
static struct aa_perms compute_fperms_other(struct aa_dfa *dfa,
aa_state_t state)
{
struct aa_perms perms = { };
perms.allow = map_old_perms(dfa_other_allow(dfa, state));
perms.audit = map_old_perms(dfa_other_audit(dfa, state));
perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
perms.xindex = dfa_other_xindex(dfa, state);
compute_fperms_allow(&perms, dfa, state);
return perms;
}
/**
* compute_fperms - convert dfa compressed perms to internal perms and store
* them so they can be retrieved later.
* @dfa: a dfa using fperms to remap to internal permissions
*
* Returns: remapped perm table
*/
static struct aa_perms *compute_fperms(struct aa_dfa *dfa,
u32 *size)
{
aa_state_t state;
unsigned int state_count;
struct aa_perms *table;
AA_BUG(!dfa);
state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
/* DFAs are restricted from having a state_count of less than 2 */
table = kvcalloc(state_count * 2, sizeof(struct aa_perms), GFP_KERNEL);
if (!table)
return NULL;
*size = state_count * 2;
for (state = 0; state < state_count; state++) {
table[state * 2] = compute_fperms_user(dfa, state);
table[state * 2 + 1] = compute_fperms_other(dfa, state);
}
return table;
}
static struct aa_perms *compute_xmatch_perms(struct aa_dfa *xmatch,
u32 *size)
{
struct aa_perms *perms;
int state;
int state_count;
AA_BUG(!xmatch);
state_count = xmatch->tables[YYTD_ID_BASE]->td_lolen;
/* DFAs are restricted from having a state_count of less than 2 */
perms = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
if (!perms)
return NULL;
*size = state_count;
/* zero init so skip the trap state (state == 0) */
for (state = 1; state < state_count; state++)
perms[state].allow = dfa_user_allow(xmatch, state);
return perms;
}
static u32 map_other(u32 x)
{
return ((x & 0x3) << 8) | /* SETATTR/GETATTR */
((x & 0x1c) << 18) | /* ACCEPT/BIND/LISTEN */
((x & 0x60) << 19); /* SETOPT/GETOPT */
}
static u32 map_xbits(u32 x)
{
return ((x & 0x1) << 7) |
((x & 0x7e) << 9);
}
static struct aa_perms compute_perms_entry(struct aa_dfa *dfa,
aa_state_t state,
u32 version)
{
struct aa_perms perms = { };
perms.allow = dfa_user_allow(dfa, state);
perms.audit = dfa_user_audit(dfa, state);
perms.quiet = dfa_user_quiet(dfa, state);
/*
* This mapping is convulated due to history.
* v1-v4: only file perms, which are handled by compute_fperms
* v5: added policydb which dropped user conditional to gain new
* perm bits, but had to map around the xbits because the
* userspace compiler was still munging them.
* v9: adds using the xbits in policydb because the compiler now
* supports treating policydb permission bits different.
* Unfortunately there is no way to force auditing on the
* perms represented by the xbits
*/
perms.allow |= map_other(dfa_other_allow(dfa, state));
if (VERSION_LE(version, v8))
perms.allow |= AA_MAY_LOCK;
else
perms.allow |= map_xbits(dfa_user_xbits(dfa, state));
/*
* for v5-v9 perm mapping in the policydb, the other set is used
* to extend the general perm set
*/
perms.audit |= map_other(dfa_other_audit(dfa, state));
perms.quiet |= map_other(dfa_other_quiet(dfa, state));
if (VERSION_GT(version, v8))
perms.quiet |= map_xbits(dfa_other_xbits(dfa, state));
return perms;
}
static struct aa_perms *compute_perms(struct aa_dfa *dfa, u32 version,
u32 *size)
{
unsigned int state;
unsigned int state_count;
struct aa_perms *table;
AA_BUG(!dfa);
state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
/* DFAs are restricted from having a state_count of less than 2 */
table = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
if (!table)
return NULL;
*size = state_count;
/* zero init so skip the trap state (state == 0) */
for (state = 1; state < state_count; state++)
table[state] = compute_perms_entry(dfa, state, version);
return table;
}
/**
* remap_dfa_accept - remap old dfa accept table to be an index
* @dfa: dfa to do the remapping on
* @factor: scaling factor for the index conversion.
*
* Used in conjunction with compute_Xperms, it converts old style perms
* that are encoded in the dfa accept tables to the new style where
* there is a permission table and the accept table is an index into
* the permission table.
*/
static void remap_dfa_accept(struct aa_dfa *dfa, unsigned int factor)
{
unsigned int state;
unsigned int state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
AA_BUG(!dfa);
for (state = 0; state < state_count; state++)
ACCEPT_TABLE(dfa)[state] = state * factor;
kvfree(dfa->tables[YYTD_ID_ACCEPT2]);
dfa->tables[YYTD_ID_ACCEPT2] = NULL;
}
/* TODO: merge different dfa mappings into single map_policy fn */
int aa_compat_map_xmatch(struct aa_policydb *policy)
{
policy->perms = compute_xmatch_perms(policy->dfa, &policy->size);
if (!policy->perms)
return -ENOMEM;
remap_dfa_accept(policy->dfa, 1);
return 0;
}
int aa_compat_map_policy(struct aa_policydb *policy, u32 version)
{
policy->perms = compute_perms(policy->dfa, version, &policy->size);
if (!policy->perms)
return -ENOMEM;
remap_dfa_accept(policy->dfa, 1);
return 0;
}
int aa_compat_map_file(struct aa_policydb *policy)
{
policy->perms = compute_fperms(policy->dfa, &policy->size);
if (!policy->perms)
return -ENOMEM;
remap_dfa_accept(policy->dfa, 2);
return 0;
}
| linux-master | security/apparmor/policy_compat.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor mediation of files
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2017 Canonical Ltd.
*/
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <uapi/linux/mount.h>
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/cred.h"
#include "include/domain.h"
#include "include/file.h"
#include "include/match.h"
#include "include/mount.h"
#include "include/path.h"
#include "include/policy.h"
static void audit_mnt_flags(struct audit_buffer *ab, unsigned long flags)
{
if (flags & MS_RDONLY)
audit_log_format(ab, "ro");
else
audit_log_format(ab, "rw");
if (flags & MS_NOSUID)
audit_log_format(ab, ", nosuid");
if (flags & MS_NODEV)
audit_log_format(ab, ", nodev");
if (flags & MS_NOEXEC)
audit_log_format(ab, ", noexec");
if (flags & MS_SYNCHRONOUS)
audit_log_format(ab, ", sync");
if (flags & MS_REMOUNT)
audit_log_format(ab, ", remount");
if (flags & MS_MANDLOCK)
audit_log_format(ab, ", mand");
if (flags & MS_DIRSYNC)
audit_log_format(ab, ", dirsync");
if (flags & MS_NOATIME)
audit_log_format(ab, ", noatime");
if (flags & MS_NODIRATIME)
audit_log_format(ab, ", nodiratime");
if (flags & MS_BIND)
audit_log_format(ab, flags & MS_REC ? ", rbind" : ", bind");
if (flags & MS_MOVE)
audit_log_format(ab, ", move");
if (flags & MS_SILENT)
audit_log_format(ab, ", silent");
if (flags & MS_POSIXACL)
audit_log_format(ab, ", acl");
if (flags & MS_UNBINDABLE)
audit_log_format(ab, flags & MS_REC ? ", runbindable" :
", unbindable");
if (flags & MS_PRIVATE)
audit_log_format(ab, flags & MS_REC ? ", rprivate" :
", private");
if (flags & MS_SLAVE)
audit_log_format(ab, flags & MS_REC ? ", rslave" :
", slave");
if (flags & MS_SHARED)
audit_log_format(ab, flags & MS_REC ? ", rshared" :
", shared");
if (flags & MS_RELATIME)
audit_log_format(ab, ", relatime");
if (flags & MS_I_VERSION)
audit_log_format(ab, ", iversion");
if (flags & MS_STRICTATIME)
audit_log_format(ab, ", strictatime");
if (flags & MS_NOUSER)
audit_log_format(ab, ", nouser");
}
/**
* audit_cb - call back for mount specific audit fields
* @ab: audit_buffer (NOT NULL)
* @va: audit struct to audit values of (NOT NULL)
*/
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
if (aad(sa)->mnt.type) {
audit_log_format(ab, " fstype=");
audit_log_untrustedstring(ab, aad(sa)->mnt.type);
}
if (aad(sa)->mnt.src_name) {
audit_log_format(ab, " srcname=");
audit_log_untrustedstring(ab, aad(sa)->mnt.src_name);
}
if (aad(sa)->mnt.trans) {
audit_log_format(ab, " trans=");
audit_log_untrustedstring(ab, aad(sa)->mnt.trans);
}
if (aad(sa)->mnt.flags) {
audit_log_format(ab, " flags=\"");
audit_mnt_flags(ab, aad(sa)->mnt.flags);
audit_log_format(ab, "\"");
}
if (aad(sa)->mnt.data) {
audit_log_format(ab, " options=");
audit_log_untrustedstring(ab, aad(sa)->mnt.data);
}
}
/**
* audit_mount - handle the auditing of mount operations
* @profile: the profile being enforced (NOT NULL)
* @op: operation being mediated (NOT NULL)
* @name: name of object being mediated (MAYBE NULL)
* @src_name: src_name of object being mediated (MAYBE_NULL)
* @type: type of filesystem (MAYBE_NULL)
* @trans: name of trans (MAYBE NULL)
* @flags: filesystem independent mount flags
* @data: filesystem mount flags
* @request: permissions requested
* @perms: the permissions computed for the request (NOT NULL)
* @info: extra information message (MAYBE NULL)
* @error: 0 if operation allowed else failure error code
*
* Returns: %0 or error on failure
*/
static int audit_mount(struct aa_profile *profile, const char *op,
const char *name, const char *src_name,
const char *type, const char *trans,
unsigned long flags, const void *data, u32 request,
struct aa_perms *perms, const char *info, int error)
{
int audit_type = AUDIT_APPARMOR_AUTO;
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_MOUNT, op);
if (likely(!error)) {
u32 mask = perms->audit;
if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
mask = 0xffff;
/* mask off perms that are not being force audited */
request &= mask;
if (likely(!request))
return 0;
audit_type = AUDIT_APPARMOR_AUDIT;
} else {
/* only report permissions that were denied */
request = request & ~perms->allow;
if (request & perms->kill)
audit_type = AUDIT_APPARMOR_KILL;
/* quiet known rejects, assumes quiet and kill do not overlap */
if ((request & perms->quiet) &&
AUDIT_MODE(profile) != AUDIT_NOQUIET &&
AUDIT_MODE(profile) != AUDIT_ALL)
request &= ~perms->quiet;
if (!request)
return error;
}
aad(&sa)->name = name;
aad(&sa)->mnt.src_name = src_name;
aad(&sa)->mnt.type = type;
aad(&sa)->mnt.trans = trans;
aad(&sa)->mnt.flags = flags;
if (data && (perms->audit & AA_AUDIT_DATA))
aad(&sa)->mnt.data = data;
aad(&sa)->info = info;
aad(&sa)->error = error;
return aa_audit(audit_type, profile, &sa, audit_cb);
}
/**
* match_mnt_flags - Do an ordered match on mount flags
* @dfa: dfa to match against
* @state: state to start in
* @flags: mount flags to match against
*
* Mount flags are encoded as an ordered match. This is done instead of
* checking against a simple bitmask, to allow for logical operations
* on the flags.
*
* Returns: next state after flags match
*/
static aa_state_t match_mnt_flags(struct aa_dfa *dfa, aa_state_t state,
unsigned long flags)
{
unsigned int i;
for (i = 0; i <= 31 ; ++i) {
if ((1 << i) & flags)
state = aa_dfa_next(dfa, state, i + 1);
}
return state;
}
static const char * const mnt_info_table[] = {
"match succeeded",
"failed mntpnt match",
"failed srcname match",
"failed type match",
"failed flags match",
"failed data match",
"failed perms check"
};
/*
* Returns 0 on success else element that match failed in, this is the
* index into the mnt_info_table above
*/
static int do_match_mnt(struct aa_policydb *policy, aa_state_t start,
const char *mntpnt, const char *devname,
const char *type, unsigned long flags,
void *data, bool binary, struct aa_perms *perms)
{
aa_state_t state;
AA_BUG(!policy);
AA_BUG(!policy->dfa);
AA_BUG(!policy->perms);
AA_BUG(!perms);
state = aa_dfa_match(policy->dfa, start, mntpnt);
state = aa_dfa_null_transition(policy->dfa, state);
if (!state)
return 1;
if (devname)
state = aa_dfa_match(policy->dfa, state, devname);
state = aa_dfa_null_transition(policy->dfa, state);
if (!state)
return 2;
if (type)
state = aa_dfa_match(policy->dfa, state, type);
state = aa_dfa_null_transition(policy->dfa, state);
if (!state)
return 3;
state = match_mnt_flags(policy->dfa, state, flags);
if (!state)
return 4;
*perms = *aa_lookup_perms(policy, state);
if (perms->allow & AA_MAY_MOUNT)
return 0;
/* only match data if not binary and the DFA flags data is expected */
if (data && !binary && (perms->allow & AA_MNT_CONT_MATCH)) {
state = aa_dfa_null_transition(policy->dfa, state);
if (!state)
return 4;
state = aa_dfa_match(policy->dfa, state, data);
if (!state)
return 5;
*perms = *aa_lookup_perms(policy, state);
if (perms->allow & AA_MAY_MOUNT)
return 0;
}
/* failed at perms check, don't confuse with flags match */
return 6;
}
static int path_flags(struct aa_profile *profile, const struct path *path)
{
AA_BUG(!profile);
AA_BUG(!path);
return profile->path_flags |
(S_ISDIR(path->dentry->d_inode->i_mode) ? PATH_IS_DIR : 0);
}
/**
* match_mnt_path_str - handle path matching for mount
* @profile: the confining profile
* @mntpath: for the mntpnt (NOT NULL)
* @buffer: buffer to be used to lookup mntpath
* @devname: string for the devname/src_name (MAY BE NULL OR ERRPTR)
* @type: string for the dev type (MAYBE NULL)
* @flags: mount flags to match
* @data: fs mount data (MAYBE NULL)
* @binary: whether @data is binary
* @devinfo: error str if (IS_ERR(@devname))
*
* Returns: 0 on success else error
*/
static int match_mnt_path_str(struct aa_profile *profile,
const struct path *mntpath, char *buffer,
const char *devname, const char *type,
unsigned long flags, void *data, bool binary,
const char *devinfo)
{
struct aa_perms perms = { };
const char *mntpnt = NULL, *info = NULL;
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
int pos, error;
AA_BUG(!profile);
AA_BUG(!mntpath);
AA_BUG(!buffer);
if (!RULE_MEDIATES(rules, AA_CLASS_MOUNT))
return 0;
error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer,
&mntpnt, &info, profile->disconnected);
if (error)
goto audit;
if (IS_ERR(devname)) {
error = PTR_ERR(devname);
devname = NULL;
info = devinfo;
goto audit;
}
error = -EACCES;
pos = do_match_mnt(&rules->policy,
rules->policy.start[AA_CLASS_MOUNT],
mntpnt, devname, type, flags, data, binary, &perms);
if (pos) {
info = mnt_info_table[pos];
goto audit;
}
error = 0;
audit:
return audit_mount(profile, OP_MOUNT, mntpnt, devname, type, NULL,
flags, data, AA_MAY_MOUNT, &perms, info, error);
}
/**
* match_mnt - handle path matching for mount
* @profile: the confining profile
* @path: for the mntpnt (NOT NULL)
* @buffer: buffer to be used to lookup mntpath
* @devpath: path devname/src_name (MAYBE NULL)
* @devbuffer: buffer to be used to lookup devname/src_name
* @type: string for the dev type (MAYBE NULL)
* @flags: mount flags to match
* @data: fs mount data (MAYBE NULL)
* @binary: whether @data is binary
*
* Returns: 0 on success else error
*/
static int match_mnt(struct aa_profile *profile, const struct path *path,
char *buffer, const struct path *devpath, char *devbuffer,
const char *type, unsigned long flags, void *data,
bool binary)
{
const char *devname = NULL, *info = NULL;
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
int error = -EACCES;
AA_BUG(!profile);
AA_BUG(devpath && !devbuffer);
if (!RULE_MEDIATES(rules, AA_CLASS_MOUNT))
return 0;
if (devpath) {
error = aa_path_name(devpath, path_flags(profile, devpath),
devbuffer, &devname, &info,
profile->disconnected);
if (error)
devname = ERR_PTR(error);
}
return match_mnt_path_str(profile, path, buffer, devname, type, flags,
data, binary, info);
}
int aa_remount(struct aa_label *label, const struct path *path,
unsigned long flags, void *data)
{
struct aa_profile *profile;
char *buffer = NULL;
bool binary;
int error;
AA_BUG(!label);
AA_BUG(!path);
binary = path->dentry->d_sb->s_type->fs_flags & FS_BINARY_MOUNTDATA;
buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, NULL, NULL, NULL,
flags, data, binary));
aa_put_buffer(buffer);
return error;
}
int aa_bind_mount(struct aa_label *label, const struct path *path,
const char *dev_name, unsigned long flags)
{
struct aa_profile *profile;
char *buffer = NULL, *old_buffer = NULL;
struct path old_path;
int error;
AA_BUG(!label);
AA_BUG(!path);
if (!dev_name || !*dev_name)
return -EINVAL;
flags &= MS_REC | MS_BIND;
error = kern_path(dev_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
if (error)
return error;
buffer = aa_get_buffer(false);
old_buffer = aa_get_buffer(false);
error = -ENOMEM;
if (!buffer || !old_buffer)
goto out;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, &old_path, old_buffer,
NULL, flags, NULL, false));
out:
aa_put_buffer(buffer);
aa_put_buffer(old_buffer);
path_put(&old_path);
return error;
}
int aa_mount_change_type(struct aa_label *label, const struct path *path,
unsigned long flags)
{
struct aa_profile *profile;
char *buffer = NULL;
int error;
AA_BUG(!label);
AA_BUG(!path);
/* These are the flags allowed by do_change_type() */
flags &= (MS_REC | MS_SILENT | MS_SHARED | MS_PRIVATE | MS_SLAVE |
MS_UNBINDABLE);
buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, NULL, NULL, NULL,
flags, NULL, false));
aa_put_buffer(buffer);
return error;
}
int aa_move_mount(struct aa_label *label, const struct path *path,
const char *orig_name)
{
struct aa_profile *profile;
char *buffer = NULL, *old_buffer = NULL;
struct path old_path;
int error;
AA_BUG(!label);
AA_BUG(!path);
if (!orig_name || !*orig_name)
return -EINVAL;
error = kern_path(orig_name, LOOKUP_FOLLOW, &old_path);
if (error)
return error;
buffer = aa_get_buffer(false);
old_buffer = aa_get_buffer(false);
error = -ENOMEM;
if (!buffer || !old_buffer)
goto out;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, &old_path, old_buffer,
NULL, MS_MOVE, NULL, false));
out:
aa_put_buffer(buffer);
aa_put_buffer(old_buffer);
path_put(&old_path);
return error;
}
int aa_new_mount(struct aa_label *label, const char *dev_name,
const struct path *path, const char *type, unsigned long flags,
void *data)
{
struct aa_profile *profile;
char *buffer = NULL, *dev_buffer = NULL;
bool binary = true;
int error;
int requires_dev = 0;
struct path tmp_path, *dev_path = NULL;
AA_BUG(!label);
AA_BUG(!path);
if (type) {
struct file_system_type *fstype;
fstype = get_fs_type(type);
if (!fstype)
return -ENODEV;
binary = fstype->fs_flags & FS_BINARY_MOUNTDATA;
requires_dev = fstype->fs_flags & FS_REQUIRES_DEV;
put_filesystem(fstype);
if (requires_dev) {
if (!dev_name || !*dev_name)
return -ENOENT;
error = kern_path(dev_name, LOOKUP_FOLLOW, &tmp_path);
if (error)
return error;
dev_path = &tmp_path;
}
}
buffer = aa_get_buffer(false);
if (!buffer) {
error = -ENOMEM;
goto out;
}
if (dev_path) {
dev_buffer = aa_get_buffer(false);
if (!dev_buffer) {
error = -ENOMEM;
goto out;
}
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, dev_path, dev_buffer,
type, flags, data, binary));
} else {
error = fn_for_each_confined(label, profile,
match_mnt_path_str(profile, path, buffer, dev_name,
type, flags, data, binary, NULL));
}
out:
aa_put_buffer(buffer);
aa_put_buffer(dev_buffer);
if (dev_path)
path_put(dev_path);
return error;
}
static int profile_umount(struct aa_profile *profile, const struct path *path,
char *buffer)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_perms perms = { };
const char *name = NULL, *info = NULL;
aa_state_t state;
int error;
AA_BUG(!profile);
AA_BUG(!path);
if (!RULE_MEDIATES(rules, AA_CLASS_MOUNT))
return 0;
error = aa_path_name(path, path_flags(profile, path), buffer, &name,
&info, profile->disconnected);
if (error)
goto audit;
state = aa_dfa_match(rules->policy.dfa,
rules->policy.start[AA_CLASS_MOUNT],
name);
perms = *aa_lookup_perms(&rules->policy, state);
if (AA_MAY_UMOUNT & ~perms.allow)
error = -EACCES;
audit:
return audit_mount(profile, OP_UMOUNT, name, NULL, NULL, NULL, 0, NULL,
AA_MAY_UMOUNT, &perms, info, error);
}
int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
{
struct aa_profile *profile;
char *buffer = NULL;
int error;
struct path path = { .mnt = mnt, .dentry = mnt->mnt_root };
AA_BUG(!label);
AA_BUG(!mnt);
buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
profile_umount(profile, &path, buffer));
aa_put_buffer(buffer);
return error;
}
/* helper fn for transition on pivotroot
*
* Returns: label for transition or ERR_PTR. Does not return NULL
*/
static struct aa_label *build_pivotroot(struct aa_profile *profile,
const struct path *new_path,
char *new_buffer,
const struct path *old_path,
char *old_buffer)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
const char *old_name, *new_name = NULL, *info = NULL;
const char *trans_name = NULL;
struct aa_perms perms = { };
aa_state_t state;
int error;
AA_BUG(!profile);
AA_BUG(!new_path);
AA_BUG(!old_path);
if (profile_unconfined(profile) ||
!RULE_MEDIATES(rules, AA_CLASS_MOUNT))
return aa_get_newest_label(&profile->label);
error = aa_path_name(old_path, path_flags(profile, old_path),
old_buffer, &old_name, &info,
profile->disconnected);
if (error)
goto audit;
error = aa_path_name(new_path, path_flags(profile, new_path),
new_buffer, &new_name, &info,
profile->disconnected);
if (error)
goto audit;
error = -EACCES;
state = aa_dfa_match(rules->policy.dfa,
rules->policy.start[AA_CLASS_MOUNT],
new_name);
state = aa_dfa_null_transition(rules->policy.dfa, state);
state = aa_dfa_match(rules->policy.dfa, state, old_name);
perms = *aa_lookup_perms(&rules->policy, state);
if (AA_MAY_PIVOTROOT & perms.allow)
error = 0;
audit:
error = audit_mount(profile, OP_PIVOTROOT, new_name, old_name,
NULL, trans_name, 0, NULL, AA_MAY_PIVOTROOT,
&perms, info, error);
if (error)
return ERR_PTR(error);
return aa_get_newest_label(&profile->label);
}
int aa_pivotroot(struct aa_label *label, const struct path *old_path,
const struct path *new_path)
{
struct aa_profile *profile;
struct aa_label *target = NULL;
char *old_buffer = NULL, *new_buffer = NULL, *info = NULL;
int error;
AA_BUG(!label);
AA_BUG(!old_path);
AA_BUG(!new_path);
old_buffer = aa_get_buffer(false);
new_buffer = aa_get_buffer(false);
error = -ENOMEM;
if (!old_buffer || !new_buffer)
goto out;
target = fn_label_build(label, profile, GFP_KERNEL,
build_pivotroot(profile, new_path, new_buffer,
old_path, old_buffer));
if (!target) {
info = "label build failed";
error = -ENOMEM;
goto fail;
} else if (!IS_ERR(target)) {
error = aa_replace_current_label(target);
if (error) {
/* TODO: audit target */
aa_put_label(target);
goto out;
}
aa_put_label(target);
} else
/* already audited error */
error = PTR_ERR(target);
out:
aa_put_buffer(old_buffer);
aa_put_buffer(new_buffer);
return error;
fail:
/* TODO: add back in auditing of new_name and old_name */
error = fn_for_each(label, profile,
audit_mount(profile, OP_PIVOTROOT, NULL /*new_name */,
NULL /* old_name */,
NULL, NULL,
0, NULL, AA_MAY_PIVOTROOT, &nullperms, info,
error));
goto out;
}
| linux-master | security/apparmor/mount.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor task related definitions and mediation
*
* Copyright 2017 Canonical Ltd.
*
* TODO
* If a task uses change_hat it currently does not return to the old
* cred or task context but instead creates a new one. Ideally the task
* should return to the previous cred if it has not been modified.
*/
#include <linux/gfp.h>
#include <linux/ptrace.h>
#include "include/audit.h"
#include "include/cred.h"
#include "include/policy.h"
#include "include/task.h"
/**
* aa_get_task_label - Get another task's label
* @task: task to query (NOT NULL)
*
* Returns: counted reference to @task's label
*/
struct aa_label *aa_get_task_label(struct task_struct *task)
{
struct aa_label *p;
rcu_read_lock();
p = aa_get_newest_cred_label(__task_cred(task));
rcu_read_unlock();
return p;
}
/**
* aa_replace_current_label - replace the current tasks label
* @label: new label (NOT NULL)
*
* Returns: 0 or error on failure
*/
int aa_replace_current_label(struct aa_label *label)
{
struct aa_label *old = aa_current_raw_label();
struct aa_task_ctx *ctx = task_ctx(current);
struct cred *new;
AA_BUG(!label);
if (old == label)
return 0;
if (current_cred() != current_real_cred())
return -EBUSY;
new = prepare_creds();
if (!new)
return -ENOMEM;
if (ctx->nnp && label_is_stale(ctx->nnp)) {
struct aa_label *tmp = ctx->nnp;
ctx->nnp = aa_get_newest_label(tmp);
aa_put_label(tmp);
}
if (unconfined(label) || (labels_ns(old) != labels_ns(label)))
/*
* if switching to unconfined or a different label namespace
* clear out context state
*/
aa_clear_task_ctx_trans(task_ctx(current));
/*
* be careful switching cred label, when racing replacement it
* is possible that the cred labels's->proxy->label is the reference
* keeping @label valid, so make sure to get its reference before
* dropping the reference on the cred's label
*/
aa_get_label(label);
aa_put_label(cred_label(new));
set_cred_label(new, label);
commit_creds(new);
return 0;
}
/**
* aa_set_current_onexec - set the tasks change_profile to happen onexec
* @label: system label to set at exec (MAYBE NULL to clear value)
* @stack: whether stacking should be done
* Returns: 0 or error on failure
*/
int aa_set_current_onexec(struct aa_label *label, bool stack)
{
struct aa_task_ctx *ctx = task_ctx(current);
aa_get_label(label);
aa_put_label(ctx->onexec);
ctx->onexec = label;
ctx->token = stack;
return 0;
}
/**
* aa_set_current_hat - set the current tasks hat
* @label: label to set as the current hat (NOT NULL)
* @token: token value that must be specified to change from the hat
*
* Do switch of tasks hat. If the task is currently in a hat
* validate the token to match.
*
* Returns: 0 or error on failure
*/
int aa_set_current_hat(struct aa_label *label, u64 token)
{
struct aa_task_ctx *ctx = task_ctx(current);
struct cred *new;
new = prepare_creds();
if (!new)
return -ENOMEM;
AA_BUG(!label);
if (!ctx->previous) {
/* transfer refcount */
ctx->previous = cred_label(new);
ctx->token = token;
} else if (ctx->token == token) {
aa_put_label(cred_label(new));
} else {
/* previous_profile && ctx->token != token */
abort_creds(new);
return -EACCES;
}
set_cred_label(new, aa_get_newest_label(label));
/* clear exec on switching context */
aa_put_label(ctx->onexec);
ctx->onexec = NULL;
commit_creds(new);
return 0;
}
/**
* aa_restore_previous_label - exit from hat context restoring previous label
* @token: the token that must be matched to exit hat context
*
* Attempt to return out of a hat to the previous label. The token
* must match the stored token value.
*
* Returns: 0 or error of failure
*/
int aa_restore_previous_label(u64 token)
{
struct aa_task_ctx *ctx = task_ctx(current);
struct cred *new;
if (ctx->token != token)
return -EACCES;
/* ignore restores when there is no saved label */
if (!ctx->previous)
return 0;
new = prepare_creds();
if (!new)
return -ENOMEM;
aa_put_label(cred_label(new));
set_cred_label(new, aa_get_newest_label(ctx->previous));
AA_BUG(!cred_label(new));
/* clear exec && prev information when restoring to previous context */
aa_clear_task_ctx_trans(ctx);
commit_creds(new);
return 0;
}
/**
* audit_ptrace_mask - convert mask to permission string
* @mask: permission mask to convert
*
* Returns: pointer to static string
*/
static const char *audit_ptrace_mask(u32 mask)
{
switch (mask) {
case MAY_READ:
return "read";
case MAY_WRITE:
return "trace";
case AA_MAY_BE_READ:
return "readby";
case AA_MAY_BE_TRACED:
return "tracedby";
}
return "";
}
/* call back to audit ptrace fields */
static void audit_ptrace_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
if (aad(sa)->request & AA_PTRACE_PERM_MASK) {
audit_log_format(ab, " requested_mask=\"%s\"",
audit_ptrace_mask(aad(sa)->request));
if (aad(sa)->denied & AA_PTRACE_PERM_MASK) {
audit_log_format(ab, " denied_mask=\"%s\"",
audit_ptrace_mask(aad(sa)->denied));
}
}
audit_log_format(ab, " peer=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
FLAGS_NONE, GFP_ATOMIC);
}
/* assumes check for RULE_MEDIATES is already done */
/* TODO: conditionals */
static int profile_ptrace_perm(struct aa_profile *profile,
struct aa_label *peer, u32 request,
struct common_audit_data *sa)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_perms perms = { };
aad(sa)->peer = peer;
aa_profile_match_label(profile, rules, peer, AA_CLASS_PTRACE, request,
&perms);
aa_apply_modes_to_perms(profile, &perms);
return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb);
}
static int profile_tracee_perm(struct aa_profile *tracee,
struct aa_label *tracer, u32 request,
struct common_audit_data *sa)
{
if (profile_unconfined(tracee) || unconfined(tracer) ||
!ANY_RULE_MEDIATES(&tracee->rules, AA_CLASS_PTRACE))
return 0;
return profile_ptrace_perm(tracee, tracer, request, sa);
}
static int profile_tracer_perm(struct aa_profile *tracer,
struct aa_label *tracee, u32 request,
struct common_audit_data *sa)
{
if (profile_unconfined(tracer))
return 0;
if (ANY_RULE_MEDIATES(&tracer->rules, AA_CLASS_PTRACE))
return profile_ptrace_perm(tracer, tracee, request, sa);
/* profile uses the old style capability check for ptrace */
if (&tracer->label == tracee)
return 0;
aad(sa)->label = &tracer->label;
aad(sa)->peer = tracee;
aad(sa)->request = 0;
aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE,
CAP_OPT_NONE);
return aa_audit(AUDIT_APPARMOR_AUTO, tracer, sa, audit_ptrace_cb);
}
/**
* aa_may_ptrace - test if tracer task can trace the tracee
* @tracer: label of the task doing the tracing (NOT NULL)
* @tracee: task label to be traced
* @request: permission request
*
* Returns: %0 else error code if permission denied or error
*/
int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee,
u32 request)
{
struct aa_profile *profile;
u32 xrequest = request << PTRACE_PERM_SHIFT;
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_PTRACE, OP_PTRACE);
return xcheck_labels(tracer, tracee, profile,
profile_tracer_perm(profile, tracee, request, &sa),
profile_tracee_perm(profile, tracer, xrequest, &sa));
}
| linux-master | security/apparmor/task.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor label definitions
*
* Copyright 2017 Canonical Ltd.
*/
#include <linux/audit.h>
#include <linux/seq_file.h>
#include <linux/sort.h>
#include "include/apparmor.h"
#include "include/cred.h"
#include "include/label.h"
#include "include/policy.h"
#include "include/secid.h"
/*
* the aa_label represents the set of profiles confining an object
*
* Labels maintain a reference count to the set of pointers they reference
* Labels are ref counted by
* tasks and object via the security field/security context off the field
* code - will take a ref count on a label if it needs the label
* beyond what is possible with an rcu_read_lock.
* profiles - each profile is a label
* secids - a pinned secid will keep a refcount of the label it is
* referencing
* objects - inode, files, sockets, ...
*
* Labels are not ref counted by the label set, so they maybe removed and
* freed when no longer in use.
*
*/
#define PROXY_POISON 97
#define LABEL_POISON 100
static void free_proxy(struct aa_proxy *proxy)
{
if (proxy) {
/* p->label will not updated any more as p is dead */
aa_put_label(rcu_dereference_protected(proxy->label, true));
memset(proxy, 0, sizeof(*proxy));
RCU_INIT_POINTER(proxy->label, (struct aa_label *)PROXY_POISON);
kfree(proxy);
}
}
void aa_proxy_kref(struct kref *kref)
{
struct aa_proxy *proxy = container_of(kref, struct aa_proxy, count);
free_proxy(proxy);
}
struct aa_proxy *aa_alloc_proxy(struct aa_label *label, gfp_t gfp)
{
struct aa_proxy *new;
new = kzalloc(sizeof(struct aa_proxy), gfp);
if (new) {
kref_init(&new->count);
rcu_assign_pointer(new->label, aa_get_label(label));
}
return new;
}
/* requires profile list write lock held */
void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
{
struct aa_label *tmp;
AA_BUG(!orig);
AA_BUG(!new);
lockdep_assert_held_write(&labels_set(orig)->lock);
tmp = rcu_dereference_protected(orig->proxy->label,
&labels_ns(orig)->lock);
rcu_assign_pointer(orig->proxy->label, aa_get_label(new));
orig->flags |= FLAG_STALE;
aa_put_label(tmp);
}
static void __proxy_share(struct aa_label *old, struct aa_label *new)
{
struct aa_proxy *proxy = new->proxy;
new->proxy = aa_get_proxy(old->proxy);
__aa_proxy_redirect(old, new);
aa_put_proxy(proxy);
}
/**
* ns_cmp - compare ns for label set ordering
* @a: ns to compare (NOT NULL)
* @b: ns to compare (NOT NULL)
*
* Returns: <0 if a < b
* ==0 if a == b
* >0 if a > b
*/
static int ns_cmp(struct aa_ns *a, struct aa_ns *b)
{
int res;
AA_BUG(!a);
AA_BUG(!b);
AA_BUG(!a->base.hname);
AA_BUG(!b->base.hname);
if (a == b)
return 0;
res = a->level - b->level;
if (res)
return res;
return strcmp(a->base.hname, b->base.hname);
}
/**
* profile_cmp - profile comparison for set ordering
* @a: profile to compare (NOT NULL)
* @b: profile to compare (NOT NULL)
*
* Returns: <0 if a < b
* ==0 if a == b
* >0 if a > b
*/
static int profile_cmp(struct aa_profile *a, struct aa_profile *b)
{
int res;
AA_BUG(!a);
AA_BUG(!b);
AA_BUG(!a->ns);
AA_BUG(!b->ns);
AA_BUG(!a->base.hname);
AA_BUG(!b->base.hname);
if (a == b || a->base.hname == b->base.hname)
return 0;
res = ns_cmp(a->ns, b->ns);
if (res)
return res;
return strcmp(a->base.hname, b->base.hname);
}
/**
* vec_cmp - label comparison for set ordering
* @a: label to compare (NOT NULL)
* @vec: vector of profiles to compare (NOT NULL)
* @n: length of @vec
*
* Returns: <0 if a < vec
* ==0 if a == vec
* >0 if a > vec
*/
static int vec_cmp(struct aa_profile **a, int an, struct aa_profile **b, int bn)
{
int i;
AA_BUG(!a);
AA_BUG(!*a);
AA_BUG(!b);
AA_BUG(!*b);
AA_BUG(an <= 0);
AA_BUG(bn <= 0);
for (i = 0; i < an && i < bn; i++) {
int res = profile_cmp(a[i], b[i]);
if (res != 0)
return res;
}
return an - bn;
}
static bool vec_is_stale(struct aa_profile **vec, int n)
{
int i;
AA_BUG(!vec);
for (i = 0; i < n; i++) {
if (profile_is_stale(vec[i]))
return true;
}
return false;
}
static long accum_vec_flags(struct aa_profile **vec, int n)
{
long u = FLAG_UNCONFINED;
int i;
AA_BUG(!vec);
for (i = 0; i < n; i++) {
u |= vec[i]->label.flags & (FLAG_DEBUG1 | FLAG_DEBUG2 |
FLAG_STALE);
if (!(u & vec[i]->label.flags & FLAG_UNCONFINED))
u &= ~FLAG_UNCONFINED;
}
return u;
}
static int sort_cmp(const void *a, const void *b)
{
return profile_cmp(*(struct aa_profile **)a, *(struct aa_profile **)b);
}
/*
* assumes vec is sorted
* Assumes @vec has null terminator at vec[n], and will null terminate
* vec[n - dups]
*/
static inline int unique(struct aa_profile **vec, int n)
{
int i, pos, dups = 0;
AA_BUG(n < 1);
AA_BUG(!vec);
pos = 0;
for (i = 1; i < n; i++) {
int res = profile_cmp(vec[pos], vec[i]);
AA_BUG(res > 0, "vec not sorted");
if (res == 0) {
/* drop duplicate */
aa_put_profile(vec[i]);
dups++;
continue;
}
pos++;
if (dups)
vec[pos] = vec[i];
}
AA_BUG(dups < 0);
return dups;
}
/**
* aa_vec_unique - canonical sort and unique a list of profiles
* @n: number of refcounted profiles in the list (@n > 0)
* @vec: list of profiles to sort and merge
*
* Returns: the number of duplicates eliminated == references put
*
* If @flags & VEC_FLAG_TERMINATE @vec has null terminator at vec[n], and will
* null terminate vec[n - dups]
*/
int aa_vec_unique(struct aa_profile **vec, int n, int flags)
{
int i, dups = 0;
AA_BUG(n < 1);
AA_BUG(!vec);
/* vecs are usually small and inorder, have a fallback for larger */
if (n > 8) {
sort(vec, n, sizeof(struct aa_profile *), sort_cmp, NULL);
dups = unique(vec, n);
goto out;
}
/* insertion sort + unique in one */
for (i = 1; i < n; i++) {
struct aa_profile *tmp = vec[i];
int pos, j;
for (pos = i - 1 - dups; pos >= 0; pos--) {
int res = profile_cmp(vec[pos], tmp);
if (res == 0) {
/* drop duplicate entry */
aa_put_profile(tmp);
dups++;
goto continue_outer;
} else if (res < 0)
break;
}
/* pos is at entry < tmp, or index -1. Set to insert pos */
pos++;
for (j = i - dups; j > pos; j--)
vec[j] = vec[j - 1];
vec[pos] = tmp;
continue_outer:
;
}
AA_BUG(dups < 0);
out:
if (flags & VEC_FLAG_TERMINATE)
vec[n - dups] = NULL;
return dups;
}
void aa_label_destroy(struct aa_label *label)
{
AA_BUG(!label);
if (!label_isprofile(label)) {
struct aa_profile *profile;
struct label_it i;
aa_put_str(label->hname);
label_for_each(i, label, profile) {
aa_put_profile(profile);
label->vec[i.i] = (struct aa_profile *)
(LABEL_POISON + (long) i.i);
}
}
if (label->proxy) {
if (rcu_dereference_protected(label->proxy->label, true) == label)
rcu_assign_pointer(label->proxy->label, NULL);
aa_put_proxy(label->proxy);
}
aa_free_secid(label->secid);
label->proxy = (struct aa_proxy *) PROXY_POISON + 1;
}
void aa_label_free(struct aa_label *label)
{
if (!label)
return;
aa_label_destroy(label);
kfree(label);
}
static void label_free_switch(struct aa_label *label)
{
if (label->flags & FLAG_NS_COUNT)
aa_free_ns(labels_ns(label));
else if (label_isprofile(label))
aa_free_profile(labels_profile(label));
else
aa_label_free(label);
}
static void label_free_rcu(struct rcu_head *head)
{
struct aa_label *label = container_of(head, struct aa_label, rcu);
if (label->flags & FLAG_IN_TREE)
(void) aa_label_remove(label);
label_free_switch(label);
}
void aa_label_kref(struct kref *kref)
{
struct aa_label *label = container_of(kref, struct aa_label, count);
struct aa_ns *ns = labels_ns(label);
if (!ns) {
/* never live, no rcu callback needed, just using the fn */
label_free_switch(label);
return;
}
/* TODO: update labels_profile macro so it works here */
AA_BUG(label_isprofile(label) &&
on_list_rcu(&label->vec[0]->base.profiles));
AA_BUG(label_isprofile(label) &&
on_list_rcu(&label->vec[0]->base.list));
/* TODO: if compound label and not stale add to reclaim cache */
call_rcu(&label->rcu, label_free_rcu);
}
static void label_free_or_put_new(struct aa_label *label, struct aa_label *new)
{
if (label != new)
/* need to free directly to break circular ref with proxy */
aa_label_free(new);
else
aa_put_label(new);
}
bool aa_label_init(struct aa_label *label, int size, gfp_t gfp)
{
AA_BUG(!label);
AA_BUG(size < 1);
if (aa_alloc_secid(label, gfp) < 0)
return false;
label->size = size; /* doesn't include null */
label->vec[size] = NULL; /* null terminate */
kref_init(&label->count);
RB_CLEAR_NODE(&label->node);
return true;
}
/**
* aa_label_alloc - allocate a label with a profile vector of @size length
* @size: size of profile vector in the label
* @proxy: proxy to use OR null if to allocate a new one
* @gfp: memory allocation type
*
* Returns: new label
* else NULL if failed
*/
struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp)
{
struct aa_label *new;
AA_BUG(size < 1);
/* + 1 for null terminator entry on vec */
new = kzalloc(struct_size(new, vec, size + 1), gfp);
AA_DEBUG("%s (%p)\n", __func__, new);
if (!new)
goto fail;
if (!aa_label_init(new, size, gfp))
goto fail;
if (!proxy) {
proxy = aa_alloc_proxy(new, gfp);
if (!proxy)
goto fail;
} else
aa_get_proxy(proxy);
/* just set new's proxy, don't redirect proxy here if it was passed in*/
new->proxy = proxy;
return new;
fail:
kfree(new);
return NULL;
}
/**
* label_cmp - label comparison for set ordering
* @a: label to compare (NOT NULL)
* @b: label to compare (NOT NULL)
*
* Returns: <0 if a < b
* ==0 if a == b
* >0 if a > b
*/
static int label_cmp(struct aa_label *a, struct aa_label *b)
{
AA_BUG(!b);
if (a == b)
return 0;
return vec_cmp(a->vec, a->size, b->vec, b->size);
}
/* helper fn for label_for_each_confined */
int aa_label_next_confined(struct aa_label *label, int i)
{
AA_BUG(!label);
AA_BUG(i < 0);
for (; i < label->size; i++) {
if (!profile_unconfined(label->vec[i]))
return i;
}
return i;
}
/**
* __aa_label_next_not_in_set - return the next profile of @sub not in @set
* @I: label iterator
* @set: label to test against
* @sub: label to if is subset of @set
*
* Returns: profile in @sub that is not in @set, with iterator set pos after
* else NULL if @sub is a subset of @set
*/
struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
struct aa_label *set,
struct aa_label *sub)
{
AA_BUG(!set);
AA_BUG(!I);
AA_BUG(I->i < 0);
AA_BUG(I->i > set->size);
AA_BUG(!sub);
AA_BUG(I->j < 0);
AA_BUG(I->j > sub->size);
while (I->j < sub->size && I->i < set->size) {
int res = profile_cmp(sub->vec[I->j], set->vec[I->i]);
if (res == 0) {
(I->j)++;
(I->i)++;
} else if (res > 0)
(I->i)++;
else
return sub->vec[(I->j)++];
}
if (I->j < sub->size)
return sub->vec[(I->j)++];
return NULL;
}
/**
* aa_label_is_subset - test if @sub is a subset of @set
* @set: label to test against
* @sub: label to test if is subset of @set
*
* Returns: true if @sub is subset of @set
* else false
*/
bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub)
{
struct label_it i = { };
AA_BUG(!set);
AA_BUG(!sub);
if (sub == set)
return true;
return __aa_label_next_not_in_set(&i, set, sub) == NULL;
}
/**
* aa_label_is_unconfined_subset - test if @sub is a subset of @set
* @set: label to test against
* @sub: label to test if is subset of @set
*
* This checks for subset but taking into account unconfined. IF
* @sub contains an unconfined profile that does not have a matching
* unconfined in @set then this will not cause the test to fail.
* Conversely we don't care about an unconfined in @set that is not in
* @sub
*
* Returns: true if @sub is special_subset of @set
* else false
*/
bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub)
{
struct label_it i = { };
struct aa_profile *p;
AA_BUG(!set);
AA_BUG(!sub);
if (sub == set)
return true;
do {
p = __aa_label_next_not_in_set(&i, set, sub);
if (p && !profile_unconfined(p))
break;
} while (p);
return p == NULL;
}
/**
* __label_remove - remove @label from the label set
* @l: label to remove
* @new: label to redirect to
*
* Requires: labels_set(@label)->lock write_lock
* Returns: true if the label was in the tree and removed
*/
static bool __label_remove(struct aa_label *label, struct aa_label *new)
{
struct aa_labelset *ls = labels_set(label);
AA_BUG(!ls);
AA_BUG(!label);
lockdep_assert_held_write(&ls->lock);
if (new)
__aa_proxy_redirect(label, new);
if (!label_is_stale(label))
__label_make_stale(label);
if (label->flags & FLAG_IN_TREE) {
rb_erase(&label->node, &ls->root);
label->flags &= ~FLAG_IN_TREE;
return true;
}
return false;
}
/**
* __label_replace - replace @old with @new in label set
* @old: label to remove from label set
* @new: label to replace @old with
*
* Requires: labels_set(@old)->lock write_lock
* valid ref count be held on @new
* Returns: true if @old was in set and replaced by @new
*
* Note: current implementation requires label set be order in such a way
* that @new directly replaces @old position in the set (ie.
* using pointer comparison of the label address would not work)
*/
static bool __label_replace(struct aa_label *old, struct aa_label *new)
{
struct aa_labelset *ls = labels_set(old);
AA_BUG(!ls);
AA_BUG(!old);
AA_BUG(!new);
lockdep_assert_held_write(&ls->lock);
AA_BUG(new->flags & FLAG_IN_TREE);
if (!label_is_stale(old))
__label_make_stale(old);
if (old->flags & FLAG_IN_TREE) {
rb_replace_node(&old->node, &new->node, &ls->root);
old->flags &= ~FLAG_IN_TREE;
new->flags |= FLAG_IN_TREE;
return true;
}
return false;
}
/**
* __label_insert - attempt to insert @l into a label set
* @ls: set of labels to insert @l into (NOT NULL)
* @label: new label to insert (NOT NULL)
* @replace: whether insertion should replace existing entry that is not stale
*
* Requires: @ls->lock
* caller to hold a valid ref on l
* if @replace is true l has a preallocated proxy associated
* Returns: @l if successful in inserting @l - with additional refcount
* else ref counted equivalent label that is already in the set,
* the else condition only happens if @replace is false
*/
static struct aa_label *__label_insert(struct aa_labelset *ls,
struct aa_label *label, bool replace)
{
struct rb_node **new, *parent = NULL;
AA_BUG(!ls);
AA_BUG(!label);
AA_BUG(labels_set(label) != ls);
lockdep_assert_held_write(&ls->lock);
AA_BUG(label->flags & FLAG_IN_TREE);
/* Figure out where to put new node */
new = &ls->root.rb_node;
while (*new) {
struct aa_label *this = rb_entry(*new, struct aa_label, node);
int result = label_cmp(label, this);
parent = *new;
if (result == 0) {
/* !__aa_get_label means queued for destruction,
* so replace in place, however the label has
* died before the replacement so do not share
* the proxy
*/
if (!replace && !label_is_stale(this)) {
if (__aa_get_label(this))
return this;
} else
__proxy_share(this, label);
AA_BUG(!__label_replace(this, label));
return aa_get_label(label);
} else if (result < 0)
new = &((*new)->rb_left);
else /* (result > 0) */
new = &((*new)->rb_right);
}
/* Add new node and rebalance tree. */
rb_link_node(&label->node, parent, new);
rb_insert_color(&label->node, &ls->root);
label->flags |= FLAG_IN_TREE;
return aa_get_label(label);
}
/**
* __vec_find - find label that matches @vec in label set
* @vec: vec of profiles to find matching label for (NOT NULL)
* @n: length of @vec
*
* Requires: @vec_labelset(vec) lock held
* caller to hold a valid ref on l
*
* Returns: ref counted @label if matching label is in tree
* ref counted label that is equiv to @l in tree
* else NULL if @vec equiv is not in tree
*/
static struct aa_label *__vec_find(struct aa_profile **vec, int n)
{
struct rb_node *node;
AA_BUG(!vec);
AA_BUG(!*vec);
AA_BUG(n <= 0);
node = vec_labelset(vec, n)->root.rb_node;
while (node) {
struct aa_label *this = rb_entry(node, struct aa_label, node);
int result = vec_cmp(this->vec, this->size, vec, n);
if (result > 0)
node = node->rb_left;
else if (result < 0)
node = node->rb_right;
else
return __aa_get_label(this);
}
return NULL;
}
/**
* __label_find - find label @label in label set
* @label: label to find (NOT NULL)
*
* Requires: labels_set(@label)->lock held
* caller to hold a valid ref on l
*
* Returns: ref counted @label if @label is in tree OR
* ref counted label that is equiv to @label in tree
* else NULL if @label or equiv is not in tree
*/
static struct aa_label *__label_find(struct aa_label *label)
{
AA_BUG(!label);
return __vec_find(label->vec, label->size);
}
/**
* aa_label_remove - remove a label from the labelset
* @label: label to remove
*
* Returns: true if @label was removed from the tree
* else @label was not in tree so it could not be removed
*/
bool aa_label_remove(struct aa_label *label)
{
struct aa_labelset *ls = labels_set(label);
unsigned long flags;
bool res;
AA_BUG(!ls);
write_lock_irqsave(&ls->lock, flags);
res = __label_remove(label, ns_unconfined(labels_ns(label)));
write_unlock_irqrestore(&ls->lock, flags);
return res;
}
/**
* aa_label_replace - replace a label @old with a new version @new
* @old: label to replace
* @new: label replacing @old
*
* Returns: true if @old was in tree and replaced
* else @old was not in tree, and @new was not inserted
*/
bool aa_label_replace(struct aa_label *old, struct aa_label *new)
{
unsigned long flags;
bool res;
if (name_is_shared(old, new) && labels_ns(old) == labels_ns(new)) {
write_lock_irqsave(&labels_set(old)->lock, flags);
if (old->proxy != new->proxy)
__proxy_share(old, new);
else
__aa_proxy_redirect(old, new);
res = __label_replace(old, new);
write_unlock_irqrestore(&labels_set(old)->lock, flags);
} else {
struct aa_label *l;
struct aa_labelset *ls = labels_set(old);
write_lock_irqsave(&ls->lock, flags);
res = __label_remove(old, new);
if (labels_ns(old) != labels_ns(new)) {
write_unlock_irqrestore(&ls->lock, flags);
ls = labels_set(new);
write_lock_irqsave(&ls->lock, flags);
}
l = __label_insert(ls, new, true);
res = (l == new);
write_unlock_irqrestore(&ls->lock, flags);
aa_put_label(l);
}
return res;
}
/**
* vec_find - find label @l in label set
* @vec: array of profiles to find equiv label for (NOT NULL)
* @n: length of @vec
*
* Returns: refcounted label if @vec equiv is in tree
* else NULL if @vec equiv is not in tree
*/
static struct aa_label *vec_find(struct aa_profile **vec, int n)
{
struct aa_labelset *ls;
struct aa_label *label;
unsigned long flags;
AA_BUG(!vec);
AA_BUG(!*vec);
AA_BUG(n <= 0);
ls = vec_labelset(vec, n);
read_lock_irqsave(&ls->lock, flags);
label = __vec_find(vec, n);
read_unlock_irqrestore(&ls->lock, flags);
return label;
}
/* requires sort and merge done first */
static struct aa_label *vec_create_and_insert_label(struct aa_profile **vec,
int len, gfp_t gfp)
{
struct aa_label *label = NULL;
struct aa_labelset *ls;
unsigned long flags;
struct aa_label *new;
int i;
AA_BUG(!vec);
if (len == 1)
return aa_get_label(&vec[0]->label);
ls = labels_set(&vec[len - 1]->label);
/* TODO: enable when read side is lockless
* check if label exists before taking locks
*/
new = aa_label_alloc(len, NULL, gfp);
if (!new)
return NULL;
for (i = 0; i < len; i++)
new->vec[i] = aa_get_profile(vec[i]);
write_lock_irqsave(&ls->lock, flags);
label = __label_insert(ls, new, false);
write_unlock_irqrestore(&ls->lock, flags);
label_free_or_put_new(label, new);
return label;
}
struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len,
gfp_t gfp)
{
struct aa_label *label = vec_find(vec, len);
if (label)
return label;
return vec_create_and_insert_label(vec, len, gfp);
}
/**
* aa_label_find - find label @label in label set
* @label: label to find (NOT NULL)
*
* Requires: caller to hold a valid ref on l
*
* Returns: refcounted @label if @label is in tree
* refcounted label that is equiv to @label in tree
* else NULL if @label or equiv is not in tree
*/
struct aa_label *aa_label_find(struct aa_label *label)
{
AA_BUG(!label);
return vec_find(label->vec, label->size);
}
/**
* aa_label_insert - insert label @label into @ls or return existing label
* @ls - labelset to insert @label into
* @label - label to insert
*
* Requires: caller to hold a valid ref on @label
*
* Returns: ref counted @label if successful in inserting @label
* else ref counted equivalent label that is already in the set
*/
struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *label)
{
struct aa_label *l;
unsigned long flags;
AA_BUG(!ls);
AA_BUG(!label);
/* check if label exists before taking lock */
if (!label_is_stale(label)) {
read_lock_irqsave(&ls->lock, flags);
l = __label_find(label);
read_unlock_irqrestore(&ls->lock, flags);
if (l)
return l;
}
write_lock_irqsave(&ls->lock, flags);
l = __label_insert(ls, label, false);
write_unlock_irqrestore(&ls->lock, flags);
return l;
}
/**
* aa_label_next_in_merge - find the next profile when merging @a and @b
* @I: label iterator
* @a: label to merge
* @b: label to merge
*
* Returns: next profile
* else null if no more profiles
*/
struct aa_profile *aa_label_next_in_merge(struct label_it *I,
struct aa_label *a,
struct aa_label *b)
{
AA_BUG(!a);
AA_BUG(!b);
AA_BUG(!I);
AA_BUG(I->i < 0);
AA_BUG(I->i > a->size);
AA_BUG(I->j < 0);
AA_BUG(I->j > b->size);
if (I->i < a->size) {
if (I->j < b->size) {
int res = profile_cmp(a->vec[I->i], b->vec[I->j]);
if (res > 0)
return b->vec[(I->j)++];
if (res == 0)
(I->j)++;
}
return a->vec[(I->i)++];
}
if (I->j < b->size)
return b->vec[(I->j)++];
return NULL;
}
/**
* label_merge_cmp - cmp of @a merging with @b against @z for set ordering
* @a: label to merge then compare (NOT NULL)
* @b: label to merge then compare (NOT NULL)
* @z: label to compare merge against (NOT NULL)
*
* Assumes: using the most recent versions of @a, @b, and @z
*
* Returns: <0 if a < b
* ==0 if a == b
* >0 if a > b
*/
static int label_merge_cmp(struct aa_label *a, struct aa_label *b,
struct aa_label *z)
{
struct aa_profile *p = NULL;
struct label_it i = { };
int k;
AA_BUG(!a);
AA_BUG(!b);
AA_BUG(!z);
for (k = 0;
k < z->size && (p = aa_label_next_in_merge(&i, a, b));
k++) {
int res = profile_cmp(p, z->vec[k]);
if (res != 0)
return res;
}
if (p)
return 1;
else if (k < z->size)
return -1;
return 0;
}
/**
* label_merge_insert - create a new label by merging @a and @b
* @new: preallocated label to merge into (NOT NULL)
* @a: label to merge with @b (NOT NULL)
* @b: label to merge with @a (NOT NULL)
*
* Requires: preallocated proxy
*
* Returns: ref counted label either @new if merge is unique
* @a if @b is a subset of @a
* @b if @a is a subset of @b
*
* NOTE: will not use @new if the merge results in @new == @a or @b
*
* Must be used within labelset write lock to avoid racing with
* setting labels stale.
*/
static struct aa_label *label_merge_insert(struct aa_label *new,
struct aa_label *a,
struct aa_label *b)
{
struct aa_label *label;
struct aa_labelset *ls;
struct aa_profile *next;
struct label_it i;
unsigned long flags;
int k = 0, invcount = 0;
bool stale = false;
AA_BUG(!a);
AA_BUG(a->size < 0);
AA_BUG(!b);
AA_BUG(b->size < 0);
AA_BUG(!new);
AA_BUG(new->size < a->size + b->size);
label_for_each_in_merge(i, a, b, next) {
AA_BUG(!next);
if (profile_is_stale(next)) {
new->vec[k] = aa_get_newest_profile(next);
AA_BUG(!new->vec[k]->label.proxy);
AA_BUG(!new->vec[k]->label.proxy->label);
if (next->label.proxy != new->vec[k]->label.proxy)
invcount++;
k++;
stale = true;
} else
new->vec[k++] = aa_get_profile(next);
}
/* set to actual size which is <= allocated len */
new->size = k;
new->vec[k] = NULL;
if (invcount) {
new->size -= aa_vec_unique(&new->vec[0], new->size,
VEC_FLAG_TERMINATE);
/* TODO: deal with reference labels */
if (new->size == 1) {
label = aa_get_label(&new->vec[0]->label);
return label;
}
} else if (!stale) {
/*
* merge could be same as a || b, note: it is not possible
* for new->size == a->size == b->size unless a == b
*/
if (k == a->size)
return aa_get_label(a);
else if (k == b->size)
return aa_get_label(b);
}
new->flags |= accum_vec_flags(new->vec, new->size);
ls = labels_set(new);
write_lock_irqsave(&ls->lock, flags);
label = __label_insert(labels_set(new), new, false);
write_unlock_irqrestore(&ls->lock, flags);
return label;
}
/**
* labelset_of_merge - find which labelset a merged label should be inserted
* @a: label to merge and insert
* @b: label to merge and insert
*
* Returns: labelset that the merged label should be inserted into
*/
static struct aa_labelset *labelset_of_merge(struct aa_label *a,
struct aa_label *b)
{
struct aa_ns *nsa = labels_ns(a);
struct aa_ns *nsb = labels_ns(b);
if (ns_cmp(nsa, nsb) <= 0)
return &nsa->labels;
return &nsb->labels;
}
/**
* __label_find_merge - find label that is equiv to merge of @a and @b
* @ls: set of labels to search (NOT NULL)
* @a: label to merge with @b (NOT NULL)
* @b: label to merge with @a (NOT NULL)
*
* Requires: ls->lock read_lock held
*
* Returns: ref counted label that is equiv to merge of @a and @b
* else NULL if merge of @a and @b is not in set
*/
static struct aa_label *__label_find_merge(struct aa_labelset *ls,
struct aa_label *a,
struct aa_label *b)
{
struct rb_node *node;
AA_BUG(!ls);
AA_BUG(!a);
AA_BUG(!b);
if (a == b)
return __label_find(a);
node = ls->root.rb_node;
while (node) {
struct aa_label *this = container_of(node, struct aa_label,
node);
int result = label_merge_cmp(a, b, this);
if (result < 0)
node = node->rb_left;
else if (result > 0)
node = node->rb_right;
else
return __aa_get_label(this);
}
return NULL;
}
/**
* aa_label_find_merge - find label that is equiv to merge of @a and @b
* @a: label to merge with @b (NOT NULL)
* @b: label to merge with @a (NOT NULL)
*
* Requires: labels be fully constructed with a valid ns
*
* Returns: ref counted label that is equiv to merge of @a and @b
* else NULL if merge of @a and @b is not in set
*/
struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b)
{
struct aa_labelset *ls;
struct aa_label *label, *ar = NULL, *br = NULL;
unsigned long flags;
AA_BUG(!a);
AA_BUG(!b);
if (label_is_stale(a))
a = ar = aa_get_newest_label(a);
if (label_is_stale(b))
b = br = aa_get_newest_label(b);
ls = labelset_of_merge(a, b);
read_lock_irqsave(&ls->lock, flags);
label = __label_find_merge(ls, a, b);
read_unlock_irqrestore(&ls->lock, flags);
aa_put_label(ar);
aa_put_label(br);
return label;
}
/**
* aa_label_merge - attempt to insert new merged label of @a and @b
* @ls: set of labels to insert label into (NOT NULL)
* @a: label to merge with @b (NOT NULL)
* @b: label to merge with @a (NOT NULL)
* @gfp: memory allocation type
*
* Requires: caller to hold valid refs on @a and @b
* labels be fully constructed with a valid ns
*
* Returns: ref counted new label if successful in inserting merge of a & b
* else ref counted equivalent label that is already in the set.
* else NULL if could not create label (-ENOMEM)
*/
struct aa_label *aa_label_merge(struct aa_label *a, struct aa_label *b,
gfp_t gfp)
{
struct aa_label *label = NULL;
AA_BUG(!a);
AA_BUG(!b);
if (a == b)
return aa_get_newest_label(a);
/* TODO: enable when read side is lockless
* check if label exists before taking locks
if (!label_is_stale(a) && !label_is_stale(b))
label = aa_label_find_merge(a, b);
*/
if (!label) {
struct aa_label *new;
a = aa_get_newest_label(a);
b = aa_get_newest_label(b);
/* could use label_merge_len(a, b), but requires double
* comparison for small savings
*/
new = aa_label_alloc(a->size + b->size, NULL, gfp);
if (!new)
goto out;
label = label_merge_insert(new, a, b);
label_free_or_put_new(label, new);
out:
aa_put_label(a);
aa_put_label(b);
}
return label;
}
/* match a profile and its associated ns component if needed
* Assumes visibility test has already been done.
* If a subns profile is not to be matched should be prescreened with
* visibility test.
*/
static inline aa_state_t match_component(struct aa_profile *profile,
struct aa_ruleset *rules,
struct aa_profile *tp,
aa_state_t state)
{
const char *ns_name;
if (profile->ns == tp->ns)
return aa_dfa_match(rules->policy.dfa, state, tp->base.hname);
/* try matching with namespace name and then profile */
ns_name = aa_ns_name(profile->ns, tp->ns, true);
state = aa_dfa_match_len(rules->policy.dfa, state, ":", 1);
state = aa_dfa_match(rules->policy.dfa, state, ns_name);
state = aa_dfa_match_len(rules->policy.dfa, state, ":", 1);
return aa_dfa_match(rules->policy.dfa, state, tp->base.hname);
}
/**
* label_compound_match - find perms for full compound label
* @profile: profile to find perms for
* @label: label to check access permissions for
* @start: state to start match in
* @subns: whether to do permission checks on components in a subns
* @request: permissions to request
* @perms: perms struct to set
*
* Returns: 0 on success else ERROR
*
* For the label A//&B//&C this does the perm match for A//&B//&C
* @perms should be preinitialized with allperms OR a previous permission
* check to be stacked.
*/
static int label_compound_match(struct aa_profile *profile,
struct aa_ruleset *rules,
struct aa_label *label,
aa_state_t state, bool subns, u32 request,
struct aa_perms *perms)
{
struct aa_profile *tp;
struct label_it i;
/* find first subcomponent that is visible */
label_for_each(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
state = match_component(profile, rules, tp, state);
if (!state)
goto fail;
goto next;
}
/* no component visible */
*perms = allperms;
return 0;
next:
label_for_each_cont(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
state = aa_dfa_match(rules->policy.dfa, state, "//&");
state = match_component(profile, rules, tp, state);
if (!state)
goto fail;
}
*perms = *aa_lookup_perms(&rules->policy, state);
aa_apply_modes_to_perms(profile, perms);
if ((perms->allow & request) != request)
return -EACCES;
return 0;
fail:
*perms = nullperms;
return state;
}
/**
* label_components_match - find perms for all subcomponents of a label
* @profile: profile to find perms for
* @rules: ruleset to search
* @label: label to check access permissions for
* @start: state to start match in
* @subns: whether to do permission checks on components in a subns
* @request: permissions to request
* @perms: an initialized perms struct to add accumulation to
*
* Returns: 0 on success else ERROR
*
* For the label A//&B//&C this does the perm match for each of A and B and C
* @perms should be preinitialized with allperms OR a previous permission
* check to be stacked.
*/
static int label_components_match(struct aa_profile *profile,
struct aa_ruleset *rules,
struct aa_label *label, aa_state_t start,
bool subns, u32 request,
struct aa_perms *perms)
{
struct aa_profile *tp;
struct label_it i;
struct aa_perms tmp;
aa_state_t state = 0;
/* find first subcomponent to test */
label_for_each(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
state = match_component(profile, rules, tp, start);
if (!state)
goto fail;
goto next;
}
/* no subcomponents visible - no change in perms */
return 0;
next:
tmp = *aa_lookup_perms(&rules->policy, state);
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
label_for_each_cont(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
state = match_component(profile, rules, tp, start);
if (!state)
goto fail;
tmp = *aa_lookup_perms(&rules->policy, state);
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
}
if ((perms->allow & request) != request)
return -EACCES;
return 0;
fail:
*perms = nullperms;
return -EACCES;
}
/**
* aa_label_match - do a multi-component label match
* @profile: profile to match against (NOT NULL)
* @rules: ruleset to search
* @label: label to match (NOT NULL)
* @state: state to start in
* @subns: whether to match subns components
* @request: permission request
* @perms: Returns computed perms (NOT NULL)
*
* Returns: the state the match finished in, may be the none matching state
*/
int aa_label_match(struct aa_profile *profile, struct aa_ruleset *rules,
struct aa_label *label, aa_state_t state, bool subns,
u32 request, struct aa_perms *perms)
{
int error = label_compound_match(profile, rules, label, state, subns,
request, perms);
if (!error)
return error;
*perms = allperms;
return label_components_match(profile, rules, label, state, subns,
request, perms);
}
/**
* aa_update_label_name - update a label to have a stored name
* @ns: ns being viewed from (NOT NULL)
* @label: label to update (NOT NULL)
* @gfp: type of memory allocation
*
* Requires: labels_set(label) not locked in caller
*
* note: only updates the label name if it does not have a name already
* and if it is in the labelset
*/
bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp)
{
struct aa_labelset *ls;
unsigned long flags;
char __counted *name;
bool res = false;
AA_BUG(!ns);
AA_BUG(!label);
if (label->hname || labels_ns(label) != ns)
return res;
if (aa_label_acntsxprint(&name, ns, label, FLAGS_NONE, gfp) < 0)
return res;
ls = labels_set(label);
write_lock_irqsave(&ls->lock, flags);
if (!label->hname && label->flags & FLAG_IN_TREE) {
label->hname = name;
res = true;
} else
aa_put_str(name);
write_unlock_irqrestore(&ls->lock, flags);
return res;
}
/*
* cached label name is present and visible
* @label->hname only exists if label is namespace hierachical
*/
static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
int flags)
{
if (label->hname && (!ns || labels_ns(label) == ns) &&
!(flags & ~FLAG_SHOW_MODE))
return true;
return false;
}
/* helper macro for snprint routines */
#define update_for_len(total, len, size, str) \
do { \
size_t ulen = len; \
\
AA_BUG(len < 0); \
total += ulen; \
ulen = min(ulen, size); \
size -= ulen; \
str += ulen; \
} while (0)
/**
* aa_profile_snxprint - print a profile name to a buffer
* @str: buffer to write to. (MAY BE NULL if @size == 0)
* @size: size of buffer
* @view: namespace profile is being viewed from
* @profile: profile to view (NOT NULL)
* @flags: whether to include the mode string
* @prev_ns: last ns printed when used in compound print
*
* Returns: size of name written or would be written if larger than
* available buffer
*
* Note: will not print anything if the profile is not visible
*/
static int aa_profile_snxprint(char *str, size_t size, struct aa_ns *view,
struct aa_profile *profile, int flags,
struct aa_ns **prev_ns)
{
const char *ns_name = NULL;
AA_BUG(!str && size != 0);
AA_BUG(!profile);
if (!view)
view = profiles_ns(profile);
if (view != profile->ns &&
(!prev_ns || (*prev_ns != profile->ns))) {
if (prev_ns)
*prev_ns = profile->ns;
ns_name = aa_ns_name(view, profile->ns,
flags & FLAG_VIEW_SUBNS);
if (ns_name == aa_hidden_ns_name) {
if (flags & FLAG_HIDDEN_UNCONFINED)
return snprintf(str, size, "%s", "unconfined");
return snprintf(str, size, "%s", ns_name);
}
}
if ((flags & FLAG_SHOW_MODE) && profile != profile->ns->unconfined) {
const char *modestr = aa_profile_mode_names[profile->mode];
if (ns_name)
return snprintf(str, size, ":%s:%s (%s)", ns_name,
profile->base.hname, modestr);
return snprintf(str, size, "%s (%s)", profile->base.hname,
modestr);
}
if (ns_name)
return snprintf(str, size, ":%s:%s", ns_name,
profile->base.hname);
return snprintf(str, size, "%s", profile->base.hname);
}
static const char *label_modename(struct aa_ns *ns, struct aa_label *label,
int flags)
{
struct aa_profile *profile;
struct label_it i;
int mode = -1, count = 0;
label_for_each(i, label, profile) {
if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
count++;
if (profile == profile->ns->unconfined)
/* special case unconfined so stacks with
* unconfined don't report as mixed. ie.
* profile_foo//&:ns1:unconfined (mixed)
*/
continue;
if (mode == -1)
mode = profile->mode;
else if (mode != profile->mode)
return "mixed";
}
}
if (count == 0)
return "-";
if (mode == -1)
/* everything was unconfined */
mode = APPARMOR_UNCONFINED;
return aa_profile_mode_names[mode];
}
/* if any visible label is not unconfined the display_mode returns true */
static inline bool display_mode(struct aa_ns *ns, struct aa_label *label,
int flags)
{
if ((flags & FLAG_SHOW_MODE)) {
struct aa_profile *profile;
struct label_it i;
label_for_each(i, label, profile) {
if (aa_ns_visible(ns, profile->ns,
flags & FLAG_VIEW_SUBNS) &&
profile != profile->ns->unconfined)
return true;
}
/* only ns->unconfined in set of profiles in ns */
return false;
}
return false;
}
/**
* aa_label_snxprint - print a label name to a string buffer
* @str: buffer to write to. (MAY BE NULL if @size == 0)
* @size: size of buffer
* @ns: namespace profile is being viewed from
* @label: label to view (NOT NULL)
* @flags: whether to include the mode string
*
* Returns: size of name written or would be written if larger than
* available buffer
*
* Note: labels do not have to be strictly hierarchical to the ns as
* objects may be shared across different namespaces and thus
* pickup labeling from each ns. If a particular part of the
* label is not visible it will just be excluded. And if none
* of the label is visible "---" will be used.
*/
int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
struct aa_label *label, int flags)
{
struct aa_profile *profile;
struct aa_ns *prev_ns = NULL;
struct label_it i;
int count = 0, total = 0;
ssize_t len;
AA_BUG(!str && size != 0);
AA_BUG(!label);
if (AA_DEBUG_LABEL && (flags & FLAG_ABS_ROOT)) {
ns = root_ns;
len = snprintf(str, size, "_");
update_for_len(total, len, size, str);
} else if (!ns) {
ns = labels_ns(label);
}
label_for_each(i, label, profile) {
if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
if (count > 0) {
len = snprintf(str, size, "//&");
update_for_len(total, len, size, str);
}
len = aa_profile_snxprint(str, size, ns, profile,
flags & FLAG_VIEW_SUBNS,
&prev_ns);
update_for_len(total, len, size, str);
count++;
}
}
if (count == 0) {
if (flags & FLAG_HIDDEN_UNCONFINED)
return snprintf(str, size, "%s", "unconfined");
return snprintf(str, size, "%s", aa_hidden_ns_name);
}
/* count == 1 && ... is for backwards compat where the mode
* is not displayed for 'unconfined' in the current ns
*/
if (display_mode(ns, label, flags)) {
len = snprintf(str, size, " (%s)",
label_modename(ns, label, flags));
update_for_len(total, len, size, str);
}
return total;
}
#undef update_for_len
/**
* aa_label_asxprint - allocate a string buffer and print label into it
* @strp: Returns - the allocated buffer with the label name. (NOT NULL)
* @ns: namespace profile is being viewed from
* @label: label to view (NOT NULL)
* @flags: flags controlling what label info is printed
* @gfp: kernel memory allocation type
*
* Returns: size of name written or would be written if larger than
* available buffer
*/
int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
int flags, gfp_t gfp)
{
int size;
AA_BUG(!strp);
AA_BUG(!label);
size = aa_label_snxprint(NULL, 0, ns, label, flags);
if (size < 0)
return size;
*strp = kmalloc(size + 1, gfp);
if (!*strp)
return -ENOMEM;
return aa_label_snxprint(*strp, size + 1, ns, label, flags);
}
/**
* aa_label_acntsxprint - allocate a __counted string buffer and print label
* @strp: buffer to write to.
* @ns: namespace profile is being viewed from
* @label: label to view (NOT NULL)
* @flags: flags controlling what label info is printed
* @gfp: kernel memory allocation type
*
* Returns: size of name written or would be written if larger than
* available buffer
*/
int aa_label_acntsxprint(char __counted **strp, struct aa_ns *ns,
struct aa_label *label, int flags, gfp_t gfp)
{
int size;
AA_BUG(!strp);
AA_BUG(!label);
size = aa_label_snxprint(NULL, 0, ns, label, flags);
if (size < 0)
return size;
*strp = aa_str_alloc(size + 1, gfp);
if (!*strp)
return -ENOMEM;
return aa_label_snxprint(*strp, size + 1, ns, label, flags);
}
void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
struct aa_label *label, int flags, gfp_t gfp)
{
const char *str;
char *name = NULL;
int len;
AA_BUG(!ab);
AA_BUG(!label);
if (!use_label_hname(ns, label, flags) ||
display_mode(ns, label, flags)) {
len = aa_label_asxprint(&name, ns, label, flags, gfp);
if (len < 0) {
AA_DEBUG("label print error");
return;
}
str = name;
} else {
str = (char *) label->hname;
len = strlen(str);
}
if (audit_string_contains_control(str, len))
audit_log_n_hex(ab, str, len);
else
audit_log_n_string(ab, str, len);
kfree(name);
}
void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
struct aa_label *label, int flags, gfp_t gfp)
{
AA_BUG(!f);
AA_BUG(!label);
if (!use_label_hname(ns, label, flags)) {
char *str;
int len;
len = aa_label_asxprint(&str, ns, label, flags, gfp);
if (len < 0) {
AA_DEBUG("label print error");
return;
}
seq_puts(f, str);
kfree(str);
} else if (display_mode(ns, label, flags))
seq_printf(f, "%s (%s)", label->hname,
label_modename(ns, label, flags));
else
seq_puts(f, label->hname);
}
void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
gfp_t gfp)
{
AA_BUG(!label);
if (!use_label_hname(ns, label, flags)) {
char *str;
int len;
len = aa_label_asxprint(&str, ns, label, flags, gfp);
if (len < 0) {
AA_DEBUG("label print error");
return;
}
pr_info("%s", str);
kfree(str);
} else if (display_mode(ns, label, flags))
pr_info("%s (%s)", label->hname,
label_modename(ns, label, flags));
else
pr_info("%s", label->hname);
}
void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp)
{
struct aa_ns *ns = aa_get_current_ns();
aa_label_xaudit(ab, ns, label, FLAG_VIEW_SUBNS, gfp);
aa_put_ns(ns);
}
void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp)
{
struct aa_ns *ns = aa_get_current_ns();
aa_label_seq_xprint(f, ns, label, FLAG_VIEW_SUBNS, gfp);
aa_put_ns(ns);
}
void aa_label_printk(struct aa_label *label, gfp_t gfp)
{
struct aa_ns *ns = aa_get_current_ns();
aa_label_xprintk(ns, label, FLAG_VIEW_SUBNS, gfp);
aa_put_ns(ns);
}
static int label_count_strn_entries(const char *str, size_t n)
{
const char *end = str + n;
const char *split;
int count = 1;
AA_BUG(!str);
for (split = aa_label_strn_split(str, end - str);
split;
split = aa_label_strn_split(str, end - str)) {
count++;
str = split + 3;
}
return count;
}
/*
* ensure stacks with components like
* :ns:A//&B
* have :ns: applied to both 'A' and 'B' by making the lookup relative
* to the base if the lookup specifies an ns, else making the stacked lookup
* relative to the last embedded ns in the string.
*/
static struct aa_profile *fqlookupn_profile(struct aa_label *base,
struct aa_label *currentbase,
const char *str, size_t n)
{
const char *first = skipn_spaces(str, n);
if (first && *first == ':')
return aa_fqlookupn_profile(base, str, n);
return aa_fqlookupn_profile(currentbase, str, n);
}
/**
* aa_label_strn_parse - parse, validate and convert a text string to a label
* @base: base label to use for lookups (NOT NULL)
* @str: null terminated text string (NOT NULL)
* @n: length of str to parse, will stop at \0 if encountered before n
* @gfp: allocation type
* @create: true if should create compound labels if they don't exist
* @force_stack: true if should stack even if no leading &
*
* Returns: the matching refcounted label if present
* else ERRPTR
*/
struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
size_t n, gfp_t gfp, bool create,
bool force_stack)
{
DEFINE_VEC(profile, vec);
struct aa_label *label, *currbase = base;
int i, len, stack = 0, error;
const char *end = str + n;
const char *split;
AA_BUG(!base);
AA_BUG(!str);
str = skipn_spaces(str, n);
if (str == NULL || (AA_DEBUG_LABEL && *str == '_' &&
base != &root_ns->unconfined->label))
return ERR_PTR(-EINVAL);
len = label_count_strn_entries(str, end - str);
if (*str == '&' || force_stack) {
/* stack on top of base */
stack = base->size;
len += stack;
if (*str == '&')
str++;
}
error = vec_setup(profile, vec, len, gfp);
if (error)
return ERR_PTR(error);
for (i = 0; i < stack; i++)
vec[i] = aa_get_profile(base->vec[i]);
for (split = aa_label_strn_split(str, end - str), i = stack;
split && i < len; i++) {
vec[i] = fqlookupn_profile(base, currbase, str, split - str);
if (!vec[i])
goto fail;
/*
* if component specified a new ns it becomes the new base
* so that subsequent lookups are relative to it
*/
if (vec[i]->ns != labels_ns(currbase))
currbase = &vec[i]->label;
str = split + 3;
split = aa_label_strn_split(str, end - str);
}
/* last element doesn't have a split */
if (i < len) {
vec[i] = fqlookupn_profile(base, currbase, str, end - str);
if (!vec[i])
goto fail;
}
if (len == 1)
/* no need to free vec as len < LOCAL_VEC_ENTRIES */
return &vec[0]->label;
len -= aa_vec_unique(vec, len, VEC_FLAG_TERMINATE);
/* TODO: deal with reference labels */
if (len == 1) {
label = aa_get_label(&vec[0]->label);
goto out;
}
if (create)
label = aa_vec_find_or_create_label(vec, len, gfp);
else
label = vec_find(vec, len);
if (!label)
goto fail;
out:
/* use adjusted len from after vec_unique, not original */
vec_cleanup(profile, vec, len);
return label;
fail:
label = ERR_PTR(-ENOENT);
goto out;
}
struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
gfp_t gfp, bool create, bool force_stack)
{
return aa_label_strn_parse(base, str, strlen(str), gfp, create,
force_stack);
}
/**
* aa_labelset_destroy - remove all labels from the label set
* @ls: label set to cleanup (NOT NULL)
*
* Labels that are removed from the set may still exist beyond the set
* being destroyed depending on their reference counting
*/
void aa_labelset_destroy(struct aa_labelset *ls)
{
struct rb_node *node;
unsigned long flags;
AA_BUG(!ls);
write_lock_irqsave(&ls->lock, flags);
for (node = rb_first(&ls->root); node; node = rb_first(&ls->root)) {
struct aa_label *this = rb_entry(node, struct aa_label, node);
if (labels_ns(this) != root_ns)
__label_remove(this,
ns_unconfined(labels_ns(this)->parent));
else
__label_remove(this, NULL);
}
write_unlock_irqrestore(&ls->lock, flags);
}
/*
* @ls: labelset to init (NOT NULL)
*/
void aa_labelset_init(struct aa_labelset *ls)
{
AA_BUG(!ls);
rwlock_init(&ls->lock);
ls->root = RB_ROOT;
}
static struct aa_label *labelset_next_stale(struct aa_labelset *ls)
{
struct aa_label *label;
struct rb_node *node;
unsigned long flags;
AA_BUG(!ls);
read_lock_irqsave(&ls->lock, flags);
__labelset_for_each(ls, node) {
label = rb_entry(node, struct aa_label, node);
if ((label_is_stale(label) ||
vec_is_stale(label->vec, label->size)) &&
__aa_get_label(label))
goto out;
}
label = NULL;
out:
read_unlock_irqrestore(&ls->lock, flags);
return label;
}
/**
* __label_update - insert updated version of @label into labelset
* @label - the label to update/replace
*
* Returns: new label that is up to date
* else NULL on failure
*
* Requires: @ns lock be held
*
* Note: worst case is the stale @label does not get updated and has
* to be updated at a later time.
*/
static struct aa_label *__label_update(struct aa_label *label)
{
struct aa_label *new, *tmp;
struct aa_labelset *ls;
unsigned long flags;
int i, invcount = 0;
AA_BUG(!label);
AA_BUG(!mutex_is_locked(&labels_ns(label)->lock));
new = aa_label_alloc(label->size, label->proxy, GFP_KERNEL);
if (!new)
return NULL;
/*
* while holding the ns_lock will stop profile replacement, removal,
* and label updates, label merging and removal can be occurring
*/
ls = labels_set(label);
write_lock_irqsave(&ls->lock, flags);
for (i = 0; i < label->size; i++) {
AA_BUG(!label->vec[i]);
new->vec[i] = aa_get_newest_profile(label->vec[i]);
AA_BUG(!new->vec[i]);
AA_BUG(!new->vec[i]->label.proxy);
AA_BUG(!new->vec[i]->label.proxy->label);
if (new->vec[i]->label.proxy != label->vec[i]->label.proxy)
invcount++;
}
/* updated stale label by being removed/renamed from labelset */
if (invcount) {
new->size -= aa_vec_unique(&new->vec[0], new->size,
VEC_FLAG_TERMINATE);
/* TODO: deal with reference labels */
if (new->size == 1) {
tmp = aa_get_label(&new->vec[0]->label);
AA_BUG(tmp == label);
goto remove;
}
if (labels_set(label) != labels_set(new)) {
write_unlock_irqrestore(&ls->lock, flags);
tmp = aa_label_insert(labels_set(new), new);
write_lock_irqsave(&ls->lock, flags);
goto remove;
}
} else
AA_BUG(labels_ns(label) != labels_ns(new));
tmp = __label_insert(labels_set(label), new, true);
remove:
/* ensure label is removed, and redirected correctly */
__label_remove(label, tmp);
write_unlock_irqrestore(&ls->lock, flags);
label_free_or_put_new(tmp, new);
return tmp;
}
/**
* __labelset_update - update labels in @ns
* @ns: namespace to update labels in (NOT NULL)
*
* Requires: @ns lock be held
*
* Walk the labelset ensuring that all labels are up to date and valid
* Any label that has a stale component is marked stale and replaced and
* by an updated version.
*
* If failures happen due to memory pressures then stale labels will
* be left in place until the next pass.
*/
static void __labelset_update(struct aa_ns *ns)
{
struct aa_label *label;
AA_BUG(!ns);
AA_BUG(!mutex_is_locked(&ns->lock));
do {
label = labelset_next_stale(&ns->labels);
if (label) {
struct aa_label *l = __label_update(label);
aa_put_label(l);
aa_put_label(label);
}
} while (label);
}
/**
* __aa_labelset_update_subtree - update all labels with a stale component
* @ns: ns to start update at (NOT NULL)
*
* Requires: @ns lock be held
*
* Invalidates labels based on @p in @ns and any children namespaces.
*/
void __aa_labelset_update_subtree(struct aa_ns *ns)
{
struct aa_ns *child;
AA_BUG(!ns);
AA_BUG(!mutex_is_locked(&ns->lock));
__labelset_update(ns);
list_for_each_entry(child, &ns->sub_ns, base.list) {
mutex_lock_nested(&child->lock, child->level);
__aa_labelset_update_subtree(child);
mutex_unlock(&child->lock);
}
}
| linux-master | security/apparmor/label.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor auditing functions
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include <linux/audit.h>
#include <linux/socket.h>
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/policy.h"
#include "include/policy_ns.h"
#include "include/secid.h"
const char *const audit_mode_names[] = {
"normal",
"quiet_denied",
"quiet",
"noquiet",
"all"
};
static const char *const aa_audit_type[] = {
"AUDIT",
"ALLOWED",
"DENIED",
"HINT",
"STATUS",
"ERROR",
"KILLED",
"AUTO"
};
static const char *const aa_class_names[] = {
"none",
"unknown",
"file",
"cap",
"net",
"rlimits",
"domain",
"mount",
"unknown",
"ptrace",
"signal",
"xmatch",
"unknown",
"unknown",
"net",
"unknown",
"label",
"posix_mqueue",
"io_uring",
"module",
"lsm",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"X",
"dbus",
};
/*
* Currently AppArmor auditing is fed straight into the audit framework.
*
* TODO:
* netlink interface for complain mode
* user auditing, - send user auditing to netlink interface
* system control of whether user audit messages go to system log
*/
/**
* audit_pre() - core AppArmor function.
* @ab: audit buffer to fill (NOT NULL)
* @ca: audit structure containing data to audit (NOT NULL)
*
* Record common AppArmor audit data from @sa
*/
static void audit_pre(struct audit_buffer *ab, void *ca)
{
struct common_audit_data *sa = ca;
if (aa_g_audit_header) {
audit_log_format(ab, "apparmor=\"%s\"",
aa_audit_type[aad(sa)->type]);
}
if (aad(sa)->op) {
audit_log_format(ab, " operation=\"%s\"", aad(sa)->op);
}
if (aad(sa)->class)
audit_log_format(ab, " class=\"%s\"",
aad(sa)->class <= AA_CLASS_LAST ?
aa_class_names[aad(sa)->class] :
"unknown");
if (aad(sa)->info) {
audit_log_format(ab, " info=\"%s\"", aad(sa)->info);
if (aad(sa)->error)
audit_log_format(ab, " error=%d", aad(sa)->error);
}
if (aad(sa)->label) {
struct aa_label *label = aad(sa)->label;
if (label_isprofile(label)) {
struct aa_profile *profile = labels_profile(label);
if (profile->ns != root_ns) {
audit_log_format(ab, " namespace=");
audit_log_untrustedstring(ab,
profile->ns->base.hname);
}
audit_log_format(ab, " profile=");
audit_log_untrustedstring(ab, profile->base.hname);
} else {
audit_log_format(ab, " label=");
aa_label_xaudit(ab, root_ns, label, FLAG_VIEW_SUBNS,
GFP_ATOMIC);
}
}
if (aad(sa)->name) {
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, aad(sa)->name);
}
}
/**
* aa_audit_msg - Log a message to the audit subsystem
* @sa: audit event structure (NOT NULL)
* @cb: optional callback fn for type specific fields (MAYBE NULL)
*/
void aa_audit_msg(int type, struct common_audit_data *sa,
void (*cb) (struct audit_buffer *, void *))
{
aad(sa)->type = type;
common_lsm_audit(sa, audit_pre, cb);
}
/**
* aa_audit - Log a profile based audit event to the audit subsystem
* @type: audit type for the message
* @profile: profile to check against (NOT NULL)
* @sa: audit event (NOT NULL)
* @cb: optional callback fn for type specific fields (MAYBE NULL)
*
* Handle default message switching based off of audit mode flags
*
* Returns: error on failure
*/
int aa_audit(int type, struct aa_profile *profile, struct common_audit_data *sa,
void (*cb) (struct audit_buffer *, void *))
{
AA_BUG(!profile);
if (type == AUDIT_APPARMOR_AUTO) {
if (likely(!aad(sa)->error)) {
if (AUDIT_MODE(profile) != AUDIT_ALL)
return 0;
type = AUDIT_APPARMOR_AUDIT;
} else if (COMPLAIN_MODE(profile))
type = AUDIT_APPARMOR_ALLOWED;
else
type = AUDIT_APPARMOR_DENIED;
}
if (AUDIT_MODE(profile) == AUDIT_QUIET ||
(type == AUDIT_APPARMOR_DENIED &&
AUDIT_MODE(profile) == AUDIT_QUIET_DENIED))
return aad(sa)->error;
if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
type = AUDIT_APPARMOR_KILL;
aad(sa)->label = &profile->label;
aa_audit_msg(type, sa, cb);
if (aad(sa)->type == AUDIT_APPARMOR_KILL)
(void)send_sig_info(SIGKILL, NULL,
sa->type == LSM_AUDIT_DATA_TASK && sa->u.tsk ?
sa->u.tsk : current);
if (aad(sa)->type == AUDIT_APPARMOR_ALLOWED)
return complain_error(aad(sa)->error);
return aad(sa)->error;
}
struct aa_audit_rule {
struct aa_label *label;
};
void aa_audit_rule_free(void *vrule)
{
struct aa_audit_rule *rule = vrule;
if (rule) {
if (!IS_ERR(rule->label))
aa_put_label(rule->label);
kfree(rule);
}
}
int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
{
struct aa_audit_rule *rule;
switch (field) {
case AUDIT_SUBJ_ROLE:
if (op != Audit_equal && op != Audit_not_equal)
return -EINVAL;
break;
default:
return -EINVAL;
}
rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
/* Currently rules are treated as coming from the root ns */
rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
GFP_KERNEL, true, false);
if (IS_ERR(rule->label)) {
int err = PTR_ERR(rule->label);
aa_audit_rule_free(rule);
return err;
}
*vrule = rule;
return 0;
}
int aa_audit_rule_known(struct audit_krule *rule)
{
int i;
for (i = 0; i < rule->field_count; i++) {
struct audit_field *f = &rule->fields[i];
switch (f->type) {
case AUDIT_SUBJ_ROLE:
return 1;
}
}
return 0;
}
int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule)
{
struct aa_audit_rule *rule = vrule;
struct aa_label *label;
int found = 0;
label = aa_secid_to_label(sid);
if (!label)
return -ENOENT;
if (aa_label_is_subset(label, rule->label))
found = 1;
switch (field) {
case AUDIT_SUBJ_ROLE:
switch (op) {
case Audit_equal:
return found;
case Audit_not_equal:
return !found;
}
}
return 0;
}
| linux-master | security/apparmor/audit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor function for pathnames
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include <linux/magic.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/nsproxy.h>
#include <linux/path.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs_struct.h>
#include "include/apparmor.h"
#include "include/path.h"
#include "include/policy.h"
/* modified from dcache.c */
static int prepend(char **buffer, int buflen, const char *str, int namelen)
{
buflen -= namelen;
if (buflen < 0)
return -ENAMETOOLONG;
*buffer -= namelen;
memcpy(*buffer, str, namelen);
return 0;
}
#define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT)
/* If the path is not connected to the expected root,
* check if it is a sysctl and handle specially else remove any
* leading / that __d_path may have returned.
* Unless
* specifically directed to connect the path,
* OR
* if in a chroot and doing chroot relative paths and the path
* resolves to the namespace root (would be connected outside
* of chroot) and specifically directed to connect paths to
* namespace root.
*/
static int disconnect(const struct path *path, char *buf, char **name,
int flags, const char *disconnected)
{
int error = 0;
if (!(flags & PATH_CONNECT_PATH) &&
!(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
our_mnt(path->mnt))) {
/* disconnected path, don't return pathname starting
* with '/'
*/
error = -EACCES;
if (**name == '/')
*name = *name + 1;
} else {
if (**name != '/')
/* CONNECT_PATH with missing root */
error = prepend(name, *name - buf, "/", 1);
if (!error && disconnected)
error = prepend(name, *name - buf, disconnected,
strlen(disconnected));
}
return error;
}
/**
* d_namespace_path - lookup a name associated with a given path
* @path: path to lookup (NOT NULL)
* @buf: buffer to store path to (NOT NULL)
* @name: Returns - pointer for start of path name with in @buf (NOT NULL)
* @flags: flags controlling path lookup
* @disconnected: string to prefix to disconnected paths
*
* Handle path name lookup.
*
* Returns: %0 else error code if path lookup fails
* When no error the path name is returned in @name which points to
* a position in @buf
*/
static int d_namespace_path(const struct path *path, char *buf, char **name,
int flags, const char *disconnected)
{
char *res;
int error = 0;
int connected = 1;
int isdir = (flags & PATH_IS_DIR) ? 1 : 0;
int buflen = aa_g_path_max - isdir;
if (path->mnt->mnt_flags & MNT_INTERNAL) {
/* it's not mounted anywhere */
res = dentry_path(path->dentry, buf, buflen);
*name = res;
if (IS_ERR(res)) {
*name = buf;
return PTR_ERR(res);
}
if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
strncmp(*name, "/sys/", 5) == 0) {
/* TODO: convert over to using a per namespace
* control instead of hard coded /proc
*/
error = prepend(name, *name - buf, "/proc", 5);
goto out;
} else
error = disconnect(path, buf, name, flags,
disconnected);
goto out;
}
/* resolve paths relative to chroot?*/
if (flags & PATH_CHROOT_REL) {
struct path root;
get_fs_root(current->fs, &root);
res = __d_path(path, &root, buf, buflen);
path_put(&root);
} else {
res = d_absolute_path(path, buf, buflen);
if (!our_mnt(path->mnt))
connected = 0;
}
/* handle error conditions - and still allow a partial path to
* be returned.
*/
if (!res || IS_ERR(res)) {
if (PTR_ERR(res) == -ENAMETOOLONG) {
error = -ENAMETOOLONG;
*name = buf;
goto out;
}
connected = 0;
res = dentry_path_raw(path->dentry, buf, buflen);
if (IS_ERR(res)) {
error = PTR_ERR(res);
*name = buf;
goto out;
}
} else if (!our_mnt(path->mnt))
connected = 0;
*name = res;
if (!connected)
error = disconnect(path, buf, name, flags, disconnected);
/* Handle two cases:
* 1. A deleted dentry && profile is not allowing mediation of deleted
* 2. On some filesystems, newly allocated dentries appear to the
* security_path hooks as a deleted dentry except without an inode
* allocated.
*/
if (d_unlinked(path->dentry) && d_is_positive(path->dentry) &&
!(flags & (PATH_MEDIATE_DELETED | PATH_DELEGATE_DELETED))) {
error = -ENOENT;
goto out;
}
out:
/*
* Append "/" to the pathname. The root directory is a special
* case; it already ends in slash.
*/
if (!error && isdir && ((*name)[1] != '\0' || (*name)[0] != '/'))
strcpy(&buf[aa_g_path_max - 2], "/");
return error;
}
/**
* aa_path_name - get the pathname to a buffer ensure dir / is appended
* @path: path the file (NOT NULL)
* @flags: flags controlling path name generation
* @buffer: buffer to put name in (NOT NULL)
* @name: Returns - the generated path name if !error (NOT NULL)
* @info: Returns - information on why the path lookup failed (MAYBE NULL)
* @disconnected: string to prepend to disconnected paths
*
* @name is a pointer to the beginning of the pathname (which usually differs
* from the beginning of the buffer), or NULL. If there is an error @name
* may contain a partial or invalid name that can be used for audit purposes,
* but it can not be used for mediation.
*
* We need PATH_IS_DIR to indicate whether the file is a directory or not
* because the file may not yet exist, and so we cannot check the inode's
* file type.
*
* Returns: %0 else error code if could retrieve name
*/
int aa_path_name(const struct path *path, int flags, char *buffer,
const char **name, const char **info, const char *disconnected)
{
char *str = NULL;
int error = d_namespace_path(path, buffer, &str, flags, disconnected);
if (info && error) {
if (error == -ENOENT)
*info = "Failed name lookup - deleted entry";
else if (error == -EACCES)
*info = "Failed name lookup - disconnected path";
else if (error == -ENAMETOOLONG)
*info = "Failed name lookup - name too long";
else
*info = "Failed name lookup";
}
*name = str;
return error;
}
| linux-master | security/apparmor/path.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor network mediation
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2017 Canonical Ltd.
*/
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/cred.h"
#include "include/label.h"
#include "include/net.h"
#include "include/policy.h"
#include "include/secid.h"
#include "net_names.h"
struct aa_sfs_entry aa_sfs_entry_network[] = {
AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK),
{ }
};
static const char * const net_mask_names[] = {
"unknown",
"send",
"receive",
"unknown",
"create",
"shutdown",
"connect",
"unknown",
"setattr",
"getattr",
"setcred",
"getcred",
"chmod",
"chown",
"chgrp",
"lock",
"mmap",
"mprot",
"unknown",
"unknown",
"accept",
"bind",
"listen",
"unknown",
"setopt",
"getopt",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
"unknown",
};
/* audit callback for net specific fields */
void audit_net_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
if (address_family_names[sa->u.net->family])
audit_log_format(ab, " family=\"%s\"",
address_family_names[sa->u.net->family]);
else
audit_log_format(ab, " family=\"unknown(%d)\"",
sa->u.net->family);
if (sock_type_names[aad(sa)->net.type])
audit_log_format(ab, " sock_type=\"%s\"",
sock_type_names[aad(sa)->net.type]);
else
audit_log_format(ab, " sock_type=\"unknown(%d)\"",
aad(sa)->net.type);
audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
if (aad(sa)->request & NET_PERMS_MASK) {
audit_log_format(ab, " requested_mask=");
aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
net_mask_names, NET_PERMS_MASK);
if (aad(sa)->denied & NET_PERMS_MASK) {
audit_log_format(ab, " denied_mask=");
aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
net_mask_names, NET_PERMS_MASK);
}
}
if (aad(sa)->peer) {
audit_log_format(ab, " peer=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
FLAGS_NONE, GFP_ATOMIC);
}
}
/* Generic af perm */
int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
u32 request, u16 family, int type)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_perms perms = { };
aa_state_t state;
__be16 buffer[2];
AA_BUG(family >= AF_MAX);
AA_BUG(type < 0 || type >= SOCK_MAX);
if (profile_unconfined(profile))
return 0;
state = RULE_MEDIATES(rules, AA_CLASS_NET);
if (!state)
return 0;
buffer[0] = cpu_to_be16(family);
buffer[1] = cpu_to_be16((u16) type);
state = aa_dfa_match_len(rules->policy.dfa, state, (char *) &buffer,
4);
perms = *aa_lookup_perms(&rules->policy, state);
aa_apply_modes_to_perms(profile, &perms);
return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
}
int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
int type, int protocol)
{
struct aa_profile *profile;
DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
return fn_for_each_confined(label, profile,
aa_profile_af_perm(profile, &sa, request, family,
type));
}
static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
struct sock *sk)
{
struct aa_sk_ctx *ctx = SK_CTX(sk);
int error = 0;
AA_BUG(!label);
AA_BUG(!sk);
if (ctx->label != kernel_t && !unconfined(label)) {
struct aa_profile *profile;
DEFINE_AUDIT_SK(sa, op, sk);
error = fn_for_each_confined(label, profile,
aa_profile_af_sk_perm(profile, &sa, request, sk));
}
return error;
}
int aa_sk_perm(const char *op, u32 request, struct sock *sk)
{
struct aa_label *label;
int error;
AA_BUG(!sk);
AA_BUG(in_interrupt());
/* TODO: switch to begin_current_label ???? */
label = begin_current_label_crit_section();
error = aa_label_sk_perm(label, op, request, sk);
end_current_label_crit_section(label);
return error;
}
int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
struct socket *sock)
{
AA_BUG(!label);
AA_BUG(!sock);
AA_BUG(!sock->sk);
return aa_label_sk_perm(label, op, request, sock->sk);
}
#ifdef CONFIG_NETWORK_SECMARK
static int apparmor_secmark_init(struct aa_secmark *secmark)
{
struct aa_label *label;
if (secmark->label[0] == '*') {
secmark->secid = AA_SECID_WILDCARD;
return 0;
}
label = aa_label_strn_parse(&root_ns->unconfined->label,
secmark->label, strlen(secmark->label),
GFP_ATOMIC, false, false);
if (IS_ERR(label))
return PTR_ERR(label);
secmark->secid = label->secid;
return 0;
}
static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
struct common_audit_data *sa)
{
int i, ret;
struct aa_perms perms = { };
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
if (rules->secmark_count == 0)
return 0;
for (i = 0; i < rules->secmark_count; i++) {
if (!rules->secmark[i].secid) {
ret = apparmor_secmark_init(&rules->secmark[i]);
if (ret)
return ret;
}
if (rules->secmark[i].secid == secid ||
rules->secmark[i].secid == AA_SECID_WILDCARD) {
if (rules->secmark[i].deny)
perms.deny = ALL_PERMS_MASK;
else
perms.allow = ALL_PERMS_MASK;
if (rules->secmark[i].audit)
perms.audit = ALL_PERMS_MASK;
}
}
aa_apply_modes_to_perms(profile, &perms);
return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
}
int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
u32 secid, const struct sock *sk)
{
struct aa_profile *profile;
DEFINE_AUDIT_SK(sa, op, sk);
return fn_for_each_confined(label, profile,
aa_secmark_perm(profile, request, secid,
&sa));
}
#endif
| linux-master | security/apparmor/net.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor policy manipulation functions
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*
* AppArmor policy is based around profiles, which contain the rules a
* task is confined by. Every task in the system has a profile attached
* to it determined either by matching "unconfined" tasks against the
* visible set of profiles or by following a profiles attachment rules.
*
* Each profile exists in a profile namespace which is a container of
* visible profiles. Each namespace contains a special "unconfined" profile,
* which doesn't enforce any confinement on a task beyond DAC.
*
* Namespace and profile names can be written together in either
* of two syntaxes.
* :namespace:profile - used by kernel interfaces for easy detection
* namespace://profile - used by policy
*
* Profile names can not start with : or @ or ^ and may not contain \0
*
* Reserved profile names
* unconfined - special automatically generated unconfined profile
* inherit - special name to indicate profile inheritance
* null-XXXX-YYYY - special automatically generated learning profiles
*
* Namespace names may not start with / or @ and may not contain \0 or :
* Reserved namespace names
* user-XXXX - user defined profiles
*
* a // in a profile or namespace name indicates a hierarchical name with the
* name before the // being the parent and the name after the child.
*
* Profile and namespace hierarchies serve two different but similar purposes.
* The namespace contains the set of visible profiles that are considered
* for attachment. The hierarchy of namespaces allows for virtualizing
* the namespace so that for example a chroot can have its own set of profiles
* which may define some local user namespaces.
* The profile hierarchy severs two distinct purposes,
* - it allows for sub profiles or hats, which allows an application to run
* subprograms under its own profile with different restriction than it
* self, and not have it use the system profile.
* eg. if a mail program starts an editor, the policy might make the
* restrictions tighter on the editor tighter than the mail program,
* and definitely different than general editor restrictions
* - it allows for binary hierarchy of profiles, so that execution history
* is preserved. This feature isn't exploited by AppArmor reference policy
* but is allowed. NOTE: this is currently suboptimal because profile
* aliasing is not currently implemented so that a profile for each
* level must be defined.
* eg. /bin/bash///bin/ls as a name would indicate /bin/ls was started
* from /bin/bash
*
* A profile or namespace name that can contain one or more // separators
* is referred to as an hname (hierarchical).
* eg. /bin/bash//bin/ls
*
* An fqname is a name that may contain both namespace and profile hnames.
* eg. :ns:/bin/bash//bin/ls
*
* NOTES:
* - locking of profile lists is currently fairly coarse. All profile
* lists within a namespace use the namespace lock.
* FIXME: move profile lists to using rcu_lists
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/cred.h>
#include <linux/rculist.h>
#include <linux/user_namespace.h>
#include "include/apparmor.h"
#include "include/capability.h"
#include "include/cred.h"
#include "include/file.h"
#include "include/ipc.h"
#include "include/match.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/policy_ns.h"
#include "include/policy_unpack.h"
#include "include/resource.h"
int unprivileged_userns_apparmor_policy = 1;
const char *const aa_profile_mode_names[] = {
"enforce",
"complain",
"kill",
"unconfined",
"user",
};
/**
* __add_profile - add a profiles to list and label tree
* @list: list to add it to (NOT NULL)
* @profile: the profile to add (NOT NULL)
*
* refcount @profile, should be put by __list_remove_profile
*
* Requires: namespace lock be held, or list not be shared
*/
static void __add_profile(struct list_head *list, struct aa_profile *profile)
{
struct aa_label *l;
AA_BUG(!list);
AA_BUG(!profile);
AA_BUG(!profile->ns);
AA_BUG(!mutex_is_locked(&profile->ns->lock));
list_add_rcu(&profile->base.list, list);
/* get list reference */
aa_get_profile(profile);
l = aa_label_insert(&profile->ns->labels, &profile->label);
AA_BUG(l != &profile->label);
aa_put_label(l);
}
/**
* __list_remove_profile - remove a profile from the list it is on
* @profile: the profile to remove (NOT NULL)
*
* remove a profile from the list, warning generally removal should
* be done with __replace_profile as most profile removals are
* replacements to the unconfined profile.
*
* put @profile list refcount
*
* Requires: namespace lock be held, or list not have been live
*/
static void __list_remove_profile(struct aa_profile *profile)
{
AA_BUG(!profile);
AA_BUG(!profile->ns);
AA_BUG(!mutex_is_locked(&profile->ns->lock));
list_del_rcu(&profile->base.list);
aa_put_profile(profile);
}
/**
* __remove_profile - remove old profile, and children
* @profile: profile to be replaced (NOT NULL)
*
* Requires: namespace list lock be held, or list not be shared
*/
static void __remove_profile(struct aa_profile *profile)
{
AA_BUG(!profile);
AA_BUG(!profile->ns);
AA_BUG(!mutex_is_locked(&profile->ns->lock));
/* release any children lists first */
__aa_profile_list_release(&profile->base.profiles);
/* released by free_profile */
aa_label_remove(&profile->label);
__aafs_profile_rmdir(profile);
__list_remove_profile(profile);
}
/**
* __aa_profile_list_release - remove all profiles on the list and put refs
* @head: list of profiles (NOT NULL)
*
* Requires: namespace lock be held
*/
void __aa_profile_list_release(struct list_head *head)
{
struct aa_profile *profile, *tmp;
list_for_each_entry_safe(profile, tmp, head, base.list)
__remove_profile(profile);
}
/**
* aa_free_data - free a data blob
* @ptr: data to free
* @arg: unused
*/
static void aa_free_data(void *ptr, void *arg)
{
struct aa_data *data = ptr;
kfree_sensitive(data->data);
kfree_sensitive(data->key);
kfree_sensitive(data);
}
static void free_attachment(struct aa_attachment *attach)
{
int i;
for (i = 0; i < attach->xattr_count; i++)
kfree_sensitive(attach->xattrs[i]);
kfree_sensitive(attach->xattrs);
aa_destroy_policydb(&attach->xmatch);
}
static void free_ruleset(struct aa_ruleset *rules)
{
int i;
aa_destroy_policydb(&rules->file);
aa_destroy_policydb(&rules->policy);
aa_free_cap_rules(&rules->caps);
aa_free_rlimit_rules(&rules->rlimits);
for (i = 0; i < rules->secmark_count; i++)
kfree_sensitive(rules->secmark[i].label);
kfree_sensitive(rules->secmark);
kfree_sensitive(rules);
}
struct aa_ruleset *aa_alloc_ruleset(gfp_t gfp)
{
struct aa_ruleset *rules;
rules = kzalloc(sizeof(*rules), gfp);
if (rules)
INIT_LIST_HEAD(&rules->list);
return rules;
}
/**
* aa_free_profile - free a profile
* @profile: the profile to free (MAYBE NULL)
*
* Free a profile, its hats and null_profile. All references to the profile,
* its hats and null_profile must have been put.
*
* If the profile was referenced from a task context, free_profile() will
* be called from an rcu callback routine, so we must not sleep here.
*/
void aa_free_profile(struct aa_profile *profile)
{
struct aa_ruleset *rule, *tmp;
struct rhashtable *rht;
AA_DEBUG("%s(%p)\n", __func__, profile);
if (!profile)
return;
/* free children profiles */
aa_policy_destroy(&profile->base);
aa_put_profile(rcu_access_pointer(profile->parent));
aa_put_ns(profile->ns);
kfree_sensitive(profile->rename);
free_attachment(&profile->attach);
/*
* at this point there are no tasks that can have a reference
* to rules
*/
list_for_each_entry_safe(rule, tmp, &profile->rules, list) {
list_del_init(&rule->list);
free_ruleset(rule);
}
kfree_sensitive(profile->dirname);
if (profile->data) {
rht = profile->data;
profile->data = NULL;
rhashtable_free_and_destroy(rht, aa_free_data, NULL);
kfree_sensitive(rht);
}
kfree_sensitive(profile->hash);
aa_put_loaddata(profile->rawdata);
aa_label_destroy(&profile->label);
kfree_sensitive(profile);
}
/**
* aa_alloc_profile - allocate, initialize and return a new profile
* @hname: name of the profile (NOT NULL)
* @gfp: allocation type
*
* Returns: refcount profile or NULL on failure
*/
struct aa_profile *aa_alloc_profile(const char *hname, struct aa_proxy *proxy,
gfp_t gfp)
{
struct aa_profile *profile;
struct aa_ruleset *rules;
/* freed by free_profile - usually through aa_put_profile */
profile = kzalloc(struct_size(profile, label.vec, 2), gfp);
if (!profile)
return NULL;
if (!aa_policy_init(&profile->base, NULL, hname, gfp))
goto fail;
if (!aa_label_init(&profile->label, 1, gfp))
goto fail;
INIT_LIST_HEAD(&profile->rules);
/* allocate the first ruleset, but leave it empty */
rules = aa_alloc_ruleset(gfp);
if (!rules)
goto fail;
list_add(&rules->list, &profile->rules);
/* update being set needed by fs interface */
if (!proxy) {
proxy = aa_alloc_proxy(&profile->label, gfp);
if (!proxy)
goto fail;
} else
aa_get_proxy(proxy);
profile->label.proxy = proxy;
profile->label.hname = profile->base.hname;
profile->label.flags |= FLAG_PROFILE;
profile->label.vec[0] = profile;
/* refcount released by caller */
return profile;
fail:
aa_free_profile(profile);
return NULL;
}
/* TODO: profile accounting - setup in remove */
/**
* __strn_find_child - find a profile on @head list using substring of @name
* @head: list to search (NOT NULL)
* @name: name of profile (NOT NULL)
* @len: length of @name substring to match
*
* Requires: rcu_read_lock be held
*
* Returns: unrefcounted profile ptr, or NULL if not found
*/
static struct aa_profile *__strn_find_child(struct list_head *head,
const char *name, int len)
{
return (struct aa_profile *)__policy_strn_find(head, name, len);
}
/**
* __find_child - find a profile on @head list with a name matching @name
* @head: list to search (NOT NULL)
* @name: name of profile (NOT NULL)
*
* Requires: rcu_read_lock be held
*
* Returns: unrefcounted profile ptr, or NULL if not found
*/
static struct aa_profile *__find_child(struct list_head *head, const char *name)
{
return __strn_find_child(head, name, strlen(name));
}
/**
* aa_find_child - find a profile by @name in @parent
* @parent: profile to search (NOT NULL)
* @name: profile name to search for (NOT NULL)
*
* Returns: a refcounted profile or NULL if not found
*/
struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
{
struct aa_profile *profile;
rcu_read_lock();
do {
profile = __find_child(&parent->base.profiles, name);
} while (profile && !aa_get_profile_not0(profile));
rcu_read_unlock();
/* refcount released by caller */
return profile;
}
/**
* __lookup_parent - lookup the parent of a profile of name @hname
* @ns: namespace to lookup profile in (NOT NULL)
* @hname: hierarchical profile name to find parent of (NOT NULL)
*
* Lookups up the parent of a fully qualified profile name, the profile
* that matches hname does not need to exist, in general this
* is used to load a new profile.
*
* Requires: rcu_read_lock be held
*
* Returns: unrefcounted policy or NULL if not found
*/
static struct aa_policy *__lookup_parent(struct aa_ns *ns,
const char *hname)
{
struct aa_policy *policy;
struct aa_profile *profile = NULL;
char *split;
policy = &ns->base;
for (split = strstr(hname, "//"); split;) {
profile = __strn_find_child(&policy->profiles, hname,
split - hname);
if (!profile)
return NULL;
policy = &profile->base;
hname = split + 2;
split = strstr(hname, "//");
}
if (!profile)
return &ns->base;
return &profile->base;
}
/**
* __create_missing_ancestors - create place holders for missing ancestores
* @ns: namespace to lookup profile in (NOT NULL)
* @hname: hierarchical profile name to find parent of (NOT NULL)
* @gfp: type of allocation.
*
* Requires: ns mutex lock held
*
* Return: unrefcounted parent policy on success or %NULL if error creating
* place holder profiles.
*/
static struct aa_policy *__create_missing_ancestors(struct aa_ns *ns,
const char *hname,
gfp_t gfp)
{
struct aa_policy *policy;
struct aa_profile *parent, *profile = NULL;
char *split;
AA_BUG(!ns);
AA_BUG(!hname);
policy = &ns->base;
for (split = strstr(hname, "//"); split;) {
parent = profile;
profile = __strn_find_child(&policy->profiles, hname,
split - hname);
if (!profile) {
const char *name = kstrndup(hname, split - hname,
gfp);
if (!name)
return NULL;
profile = aa_alloc_null(parent, name, gfp);
kfree(name);
if (!profile)
return NULL;
if (!parent)
profile->ns = aa_get_ns(ns);
}
policy = &profile->base;
hname = split + 2;
split = strstr(hname, "//");
}
if (!profile)
return &ns->base;
return &profile->base;
}
/**
* __lookupn_profile - lookup the profile matching @hname
* @base: base list to start looking up profile name from (NOT NULL)
* @hname: hierarchical profile name (NOT NULL)
* @n: length of @hname
*
* Requires: rcu_read_lock be held
*
* Returns: unrefcounted profile pointer or NULL if not found
*
* Do a relative name lookup, recursing through profile tree.
*/
static struct aa_profile *__lookupn_profile(struct aa_policy *base,
const char *hname, size_t n)
{
struct aa_profile *profile = NULL;
const char *split;
for (split = strnstr(hname, "//", n); split;
split = strnstr(hname, "//", n)) {
profile = __strn_find_child(&base->profiles, hname,
split - hname);
if (!profile)
return NULL;
base = &profile->base;
n -= split + 2 - hname;
hname = split + 2;
}
if (n)
return __strn_find_child(&base->profiles, hname, n);
return NULL;
}
static struct aa_profile *__lookup_profile(struct aa_policy *base,
const char *hname)
{
return __lookupn_profile(base, hname, strlen(hname));
}
/**
* aa_lookupn_profile - find a profile by its full or partial name
* @ns: the namespace to start from (NOT NULL)
* @hname: name to do lookup on. Does not contain namespace prefix (NOT NULL)
* @n: size of @hname
*
* Returns: refcounted profile or NULL if not found
*/
struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname,
size_t n)
{
struct aa_profile *profile;
rcu_read_lock();
do {
profile = __lookupn_profile(&ns->base, hname, n);
} while (profile && !aa_get_profile_not0(profile));
rcu_read_unlock();
/* the unconfined profile is not in the regular profile list */
if (!profile && strncmp(hname, "unconfined", n) == 0)
profile = aa_get_newest_profile(ns->unconfined);
/* refcount released by caller */
return profile;
}
struct aa_profile *aa_lookup_profile(struct aa_ns *ns, const char *hname)
{
return aa_lookupn_profile(ns, hname, strlen(hname));
}
struct aa_profile *aa_fqlookupn_profile(struct aa_label *base,
const char *fqname, size_t n)
{
struct aa_profile *profile;
struct aa_ns *ns;
const char *name, *ns_name;
size_t ns_len;
name = aa_splitn_fqname(fqname, n, &ns_name, &ns_len);
if (ns_name) {
ns = aa_lookupn_ns(labels_ns(base), ns_name, ns_len);
if (!ns)
return NULL;
} else
ns = aa_get_ns(labels_ns(base));
if (name)
profile = aa_lookupn_profile(ns, name, n - (name - fqname));
else if (ns)
/* default profile for ns, currently unconfined */
profile = aa_get_newest_profile(ns->unconfined);
else
profile = NULL;
aa_put_ns(ns);
return profile;
}
struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name,
gfp_t gfp)
{
struct aa_profile *profile;
struct aa_ruleset *rules;
profile = aa_alloc_profile(name, NULL, gfp);
if (!profile)
return NULL;
/* TODO: ideally we should inherit abi from parent */
profile->label.flags |= FLAG_NULL;
rules = list_first_entry(&profile->rules, typeof(*rules), list);
rules->file.dfa = aa_get_dfa(nulldfa);
rules->file.perms = kcalloc(2, sizeof(struct aa_perms), GFP_KERNEL);
if (!rules->file.perms)
goto fail;
rules->file.size = 2;
rules->policy.dfa = aa_get_dfa(nulldfa);
rules->policy.perms = kcalloc(2, sizeof(struct aa_perms), GFP_KERNEL);
if (!rules->policy.perms)
goto fail;
rules->policy.size = 2;
if (parent) {
profile->path_flags = parent->path_flags;
/* released on free_profile */
rcu_assign_pointer(profile->parent, aa_get_profile(parent));
profile->ns = aa_get_ns(parent->ns);
}
return profile;
fail:
aa_free_profile(profile);
return NULL;
}
/**
* aa_new_learning_profile - create or find a null-X learning profile
* @parent: profile that caused this profile to be created (NOT NULL)
* @hat: true if the null- learning profile is a hat
* @base: name to base the null profile off of
* @gfp: type of allocation
*
* Find/Create a null- complain mode profile used in learning mode. The
* name of the profile is unique and follows the format of parent//null-XXX.
* where XXX is based on the @name or if that fails or is not supplied
* a unique number
*
* null profiles are added to the profile list but the list does not
* hold a count on them so that they are automatically released when
* not in use.
*
* Returns: new refcounted profile else NULL on failure
*/
struct aa_profile *aa_new_learning_profile(struct aa_profile *parent, bool hat,
const char *base, gfp_t gfp)
{
struct aa_profile *p, *profile;
const char *bname;
char *name = NULL;
AA_BUG(!parent);
if (base) {
name = kmalloc(strlen(parent->base.hname) + 8 + strlen(base),
gfp);
if (name) {
sprintf(name, "%s//null-%s", parent->base.hname, base);
goto name;
}
/* fall through to try shorter uniq */
}
name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, gfp);
if (!name)
return NULL;
sprintf(name, "%s//null-%x", parent->base.hname,
atomic_inc_return(&parent->ns->uniq_null));
name:
/* lookup to see if this is a dup creation */
bname = basename(name);
profile = aa_find_child(parent, bname);
if (profile)
goto out;
profile = aa_alloc_null(parent, name, gfp);
if (!profile)
goto fail;
profile->mode = APPARMOR_COMPLAIN;
if (hat)
profile->label.flags |= FLAG_HAT;
mutex_lock_nested(&profile->ns->lock, profile->ns->level);
p = __find_child(&parent->base.profiles, bname);
if (p) {
aa_free_profile(profile);
profile = aa_get_profile(p);
} else {
__add_profile(&parent->base.profiles, profile);
}
mutex_unlock(&profile->ns->lock);
/* refcount released by caller */
out:
kfree(name);
return profile;
fail:
kfree(name);
aa_free_profile(profile);
return NULL;
}
/**
* replacement_allowed - test to see if replacement is allowed
* @profile: profile to test if it can be replaced (MAYBE NULL)
* @noreplace: true if replacement shouldn't be allowed but addition is okay
* @info: Returns - info about why replacement failed (NOT NULL)
*
* Returns: %0 if replacement allowed else error code
*/
static int replacement_allowed(struct aa_profile *profile, int noreplace,
const char **info)
{
if (profile) {
if (profile->label.flags & FLAG_IMMUTIBLE) {
*info = "cannot replace immutable profile";
return -EPERM;
} else if (noreplace) {
*info = "profile already exists";
return -EEXIST;
}
}
return 0;
}
/* audit callback for net specific fields */
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
if (aad(sa)->iface.ns) {
audit_log_format(ab, " ns=");
audit_log_untrustedstring(ab, aad(sa)->iface.ns);
}
}
/**
* audit_policy - Do auditing of policy changes
* @label: label to check if it can manage policy
* @op: policy operation being performed
* @ns_name: name of namespace being manipulated
* @name: name of profile being manipulated (NOT NULL)
* @info: any extra information to be audited (MAYBE NULL)
* @error: error code
*
* Returns: the error to be returned after audit is done
*/
static int audit_policy(struct aa_label *label, const char *op,
const char *ns_name, const char *name,
const char *info, int error)
{
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, op);
aad(&sa)->iface.ns = ns_name;
aad(&sa)->name = name;
aad(&sa)->info = info;
aad(&sa)->error = error;
aad(&sa)->label = label;
aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, audit_cb);
return error;
}
/* don't call out to other LSMs in the stack for apparmor policy admin
* permissions
*/
static int policy_ns_capable(struct aa_label *label,
struct user_namespace *userns, int cap)
{
int err;
/* check for MAC_ADMIN cap in cred */
err = cap_capable(current_cred(), userns, cap, CAP_OPT_NONE);
if (!err)
err = aa_capable(label, cap, CAP_OPT_NONE);
return err;
}
/**
* aa_policy_view_capable - check if viewing policy in at @ns is allowed
* label: label that is trying to view policy in ns
* ns: namespace being viewed by @label (may be NULL if @label's ns)
* Returns: true if viewing policy is allowed
*
* If @ns is NULL then the namespace being viewed is assumed to be the
* tasks current namespace.
*/
bool aa_policy_view_capable(struct aa_label *label, struct aa_ns *ns)
{
struct user_namespace *user_ns = current_user_ns();
struct aa_ns *view_ns = labels_view(label);
bool root_in_user_ns = uid_eq(current_euid(), make_kuid(user_ns, 0)) ||
in_egroup_p(make_kgid(user_ns, 0));
bool response = false;
if (!ns)
ns = view_ns;
if (root_in_user_ns && aa_ns_visible(view_ns, ns, true) &&
(user_ns == &init_user_ns ||
(unprivileged_userns_apparmor_policy != 0 &&
user_ns->level == view_ns->level)))
response = true;
return response;
}
bool aa_policy_admin_capable(struct aa_label *label, struct aa_ns *ns)
{
struct user_namespace *user_ns = current_user_ns();
bool capable = policy_ns_capable(label, user_ns, CAP_MAC_ADMIN) == 0;
AA_DEBUG("cap_mac_admin? %d\n", capable);
AA_DEBUG("policy locked? %d\n", aa_g_lock_policy);
return aa_policy_view_capable(label, ns) && capable &&
!aa_g_lock_policy;
}
bool aa_current_policy_view_capable(struct aa_ns *ns)
{
struct aa_label *label;
bool res;
label = __begin_current_label_crit_section();
res = aa_policy_view_capable(label, ns);
__end_current_label_crit_section(label);
return res;
}
bool aa_current_policy_admin_capable(struct aa_ns *ns)
{
struct aa_label *label;
bool res;
label = __begin_current_label_crit_section();
res = aa_policy_admin_capable(label, ns);
__end_current_label_crit_section(label);
return res;
}
/**
* aa_may_manage_policy - can the current task manage policy
* @label: label to check if it can manage policy
* @mask: contains the policy manipulation operation being done
*
* Returns: 0 if the task is allowed to manipulate policy else error
*/
int aa_may_manage_policy(struct aa_label *label, struct aa_ns *ns, u32 mask)
{
const char *op;
if (mask & AA_MAY_REMOVE_POLICY)
op = OP_PROF_RM;
else if (mask & AA_MAY_REPLACE_POLICY)
op = OP_PROF_REPL;
else
op = OP_PROF_LOAD;
/* check if loading policy is locked out */
if (aa_g_lock_policy)
return audit_policy(label, op, NULL, NULL, "policy_locked",
-EACCES);
if (!aa_policy_admin_capable(label, ns))
return audit_policy(label, op, NULL, NULL, "not policy admin",
-EACCES);
/* TODO: add fine grained mediation of policy loads */
return 0;
}
static struct aa_profile *__list_lookup_parent(struct list_head *lh,
struct aa_profile *profile)
{
const char *base = basename(profile->base.hname);
long len = base - profile->base.hname;
struct aa_load_ent *ent;
/* parent won't have trailing // so remove from len */
if (len <= 2)
return NULL;
len -= 2;
list_for_each_entry(ent, lh, list) {
if (ent->new == profile)
continue;
if (strncmp(ent->new->base.hname, profile->base.hname, len) ==
0 && ent->new->base.hname[len] == 0)
return ent->new;
}
return NULL;
}
/**
* __replace_profile - replace @old with @new on a list
* @old: profile to be replaced (NOT NULL)
* @new: profile to replace @old with (NOT NULL)
*
* Will duplicate and refcount elements that @new inherits from @old
* and will inherit @old children.
*
* refcount @new for list, put @old list refcount
*
* Requires: namespace list lock be held, or list not be shared
*/
static void __replace_profile(struct aa_profile *old, struct aa_profile *new)
{
struct aa_profile *child, *tmp;
if (!list_empty(&old->base.profiles)) {
LIST_HEAD(lh);
list_splice_init_rcu(&old->base.profiles, &lh, synchronize_rcu);
list_for_each_entry_safe(child, tmp, &lh, base.list) {
struct aa_profile *p;
list_del_init(&child->base.list);
p = __find_child(&new->base.profiles, child->base.name);
if (p) {
/* @p replaces @child */
__replace_profile(child, p);
continue;
}
/* inherit @child and its children */
/* TODO: update hname of inherited children */
/* list refcount transferred to @new */
p = aa_deref_parent(child);
rcu_assign_pointer(child->parent, aa_get_profile(new));
list_add_rcu(&child->base.list, &new->base.profiles);
aa_put_profile(p);
}
}
if (!rcu_access_pointer(new->parent)) {
struct aa_profile *parent = aa_deref_parent(old);
rcu_assign_pointer(new->parent, aa_get_profile(parent));
}
aa_label_replace(&old->label, &new->label);
/* migrate dents must come after label replacement b/c update */
__aafs_profile_migrate_dents(old, new);
if (list_empty(&new->base.list)) {
/* new is not on a list already */
list_replace_rcu(&old->base.list, &new->base.list);
aa_get_profile(new);
aa_put_profile(old);
} else
__list_remove_profile(old);
}
/**
* __lookup_replace - lookup replacement information for a profile
* @ns - namespace the lookup occurs in
* @hname - name of profile to lookup
* @noreplace - true if not replacing an existing profile
* @p - Returns: profile to be replaced
* @info - Returns: info string on why lookup failed
*
* Returns: profile to replace (no ref) on success else ptr error
*/
static int __lookup_replace(struct aa_ns *ns, const char *hname,
bool noreplace, struct aa_profile **p,
const char **info)
{
*p = aa_get_profile(__lookup_profile(&ns->base, hname));
if (*p) {
int error = replacement_allowed(*p, noreplace, info);
if (error) {
*info = "profile can not be replaced";
return error;
}
}
return 0;
}
static void share_name(struct aa_profile *old, struct aa_profile *new)
{
aa_put_str(new->base.hname);
aa_get_str(old->base.hname);
new->base.hname = old->base.hname;
new->base.name = old->base.name;
new->label.hname = old->label.hname;
}
/* Update to newest version of parent after previous replacements
* Returns: unrefcount newest version of parent
*/
static struct aa_profile *update_to_newest_parent(struct aa_profile *new)
{
struct aa_profile *parent, *newest;
parent = rcu_dereference_protected(new->parent,
mutex_is_locked(&new->ns->lock));
newest = aa_get_newest_profile(parent);
/* parent replaced in this atomic set? */
if (newest != parent) {
aa_put_profile(parent);
rcu_assign_pointer(new->parent, newest);
} else
aa_put_profile(newest);
return newest;
}
/**
* aa_replace_profiles - replace profile(s) on the profile list
* @policy_ns: namespace load is occurring on
* @label: label that is attempting to load/replace policy
* @mask: permission mask
* @udata: serialized data stream (NOT NULL)
*
* unpack and replace a profile on the profile list and uses of that profile
* by any task creds via invalidating the old version of the profile, which
* tasks will notice to update their own cred. If the profile does not exist
* on the profile list it is added.
*
* Returns: size of data consumed else error code on failure.
*/
ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
u32 mask, struct aa_loaddata *udata)
{
const char *ns_name = NULL, *info = NULL;
struct aa_ns *ns = NULL;
struct aa_load_ent *ent, *tmp;
struct aa_loaddata *rawdata_ent;
const char *op;
ssize_t count, error;
LIST_HEAD(lh);
op = mask & AA_MAY_REPLACE_POLICY ? OP_PROF_REPL : OP_PROF_LOAD;
aa_get_loaddata(udata);
/* released below */
error = aa_unpack(udata, &lh, &ns_name);
if (error)
goto out;
/* ensure that profiles are all for the same ns
* TODO: update locking to remove this constaint. All profiles in
* the load set must succeed as a set or the load will
* fail. Sort ent list and take ns locks in hierarchy order
*/
count = 0;
list_for_each_entry(ent, &lh, list) {
if (ns_name) {
if (ent->ns_name &&
strcmp(ent->ns_name, ns_name) != 0) {
info = "policy load has mixed namespaces";
error = -EACCES;
goto fail;
}
} else if (ent->ns_name) {
if (count) {
info = "policy load has mixed namespaces";
error = -EACCES;
goto fail;
}
ns_name = ent->ns_name;
} else
count++;
}
if (ns_name) {
ns = aa_prepare_ns(policy_ns ? policy_ns : labels_ns(label),
ns_name);
if (IS_ERR(ns)) {
op = OP_PROF_LOAD;
info = "failed to prepare namespace";
error = PTR_ERR(ns);
ns = NULL;
ent = NULL;
goto fail;
}
} else
ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(label));
mutex_lock_nested(&ns->lock, ns->level);
/* check for duplicate rawdata blobs: space and file dedup */
if (!list_empty(&ns->rawdata_list)) {
list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) {
if (aa_rawdata_eq(rawdata_ent, udata)) {
struct aa_loaddata *tmp;
tmp = __aa_get_loaddata(rawdata_ent);
/* check we didn't fail the race */
if (tmp) {
aa_put_loaddata(udata);
udata = tmp;
break;
}
}
}
}
/* setup parent and ns info */
list_for_each_entry(ent, &lh, list) {
struct aa_policy *policy;
struct aa_profile *p;
if (aa_g_export_binary)
ent->new->rawdata = aa_get_loaddata(udata);
error = __lookup_replace(ns, ent->new->base.hname,
!(mask & AA_MAY_REPLACE_POLICY),
&ent->old, &info);
if (error)
goto fail_lock;
if (ent->new->rename) {
error = __lookup_replace(ns, ent->new->rename,
!(mask & AA_MAY_REPLACE_POLICY),
&ent->rename, &info);
if (error)
goto fail_lock;
}
/* released when @new is freed */
ent->new->ns = aa_get_ns(ns);
if (ent->old || ent->rename)
continue;
/* no ref on policy only use inside lock */
p = NULL;
policy = __lookup_parent(ns, ent->new->base.hname);
if (!policy) {
/* first check for parent in the load set */
p = __list_lookup_parent(&lh, ent->new);
if (!p) {
/*
* fill in missing parent with null
* profile that doesn't have
* permissions. This allows for
* individual profile loading where
* the child is loaded before the
* parent, and outside of the current
* atomic set. This unfortunately can
* happen with some userspaces. The
* null profile will be replaced once
* the parent is loaded.
*/
policy = __create_missing_ancestors(ns,
ent->new->base.hname,
GFP_KERNEL);
if (!policy) {
error = -ENOENT;
info = "parent does not exist";
goto fail_lock;
}
}
}
if (!p && policy != &ns->base)
/* released on profile replacement or free_profile */
p = (struct aa_profile *) policy;
rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
}
/* create new fs entries for introspection if needed */
if (!udata->dents[AAFS_LOADDATA_DIR] && aa_g_export_binary) {
error = __aa_fs_create_rawdata(ns, udata);
if (error) {
info = "failed to create raw_data dir and files";
ent = NULL;
goto fail_lock;
}
}
list_for_each_entry(ent, &lh, list) {
if (!ent->old) {
struct dentry *parent;
if (rcu_access_pointer(ent->new->parent)) {
struct aa_profile *p;
p = aa_deref_parent(ent->new);
parent = prof_child_dir(p);
} else
parent = ns_subprofs_dir(ent->new->ns);
error = __aafs_profile_mkdir(ent->new, parent);
}
if (error) {
info = "failed to create";
goto fail_lock;
}
}
/* Done with checks that may fail - do actual replacement */
__aa_bump_ns_revision(ns);
if (aa_g_export_binary)
__aa_loaddata_update(udata, ns->revision);
list_for_each_entry_safe(ent, tmp, &lh, list) {
list_del_init(&ent->list);
op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
if (ent->old && ent->old->rawdata == ent->new->rawdata &&
ent->new->rawdata) {
/* dedup actual profile replacement */
audit_policy(label, op, ns_name, ent->new->base.hname,
"same as current profile, skipping",
error);
/* break refcount cycle with proxy. */
aa_put_proxy(ent->new->label.proxy);
ent->new->label.proxy = NULL;
goto skip;
}
/*
* TODO: finer dedup based on profile range in data. Load set
* can differ but profile may remain unchanged
*/
audit_policy(label, op, ns_name, ent->new->base.hname, NULL,
error);
if (ent->old) {
share_name(ent->old, ent->new);
__replace_profile(ent->old, ent->new);
} else {
struct list_head *lh;
if (rcu_access_pointer(ent->new->parent)) {
struct aa_profile *parent;
parent = update_to_newest_parent(ent->new);
lh = &parent->base.profiles;
} else
lh = &ns->base.profiles;
__add_profile(lh, ent->new);
}
skip:
aa_load_ent_free(ent);
}
__aa_labelset_update_subtree(ns);
mutex_unlock(&ns->lock);
out:
aa_put_ns(ns);
aa_put_loaddata(udata);
kfree(ns_name);
if (error)
return error;
return udata->size;
fail_lock:
mutex_unlock(&ns->lock);
/* audit cause of failure */
op = (ent && !ent->old) ? OP_PROF_LOAD : OP_PROF_REPL;
fail:
audit_policy(label, op, ns_name, ent ? ent->new->base.hname : NULL,
info, error);
/* audit status that rest of profiles in the atomic set failed too */
info = "valid profile in failed atomic policy load";
list_for_each_entry(tmp, &lh, list) {
if (tmp == ent) {
info = "unchecked profile in failed atomic policy load";
/* skip entry that caused failure */
continue;
}
op = (!tmp->old) ? OP_PROF_LOAD : OP_PROF_REPL;
audit_policy(label, op, ns_name, tmp->new->base.hname, info,
error);
}
list_for_each_entry_safe(ent, tmp, &lh, list) {
list_del_init(&ent->list);
aa_load_ent_free(ent);
}
goto out;
}
/**
* aa_remove_profiles - remove profile(s) from the system
* @policy_ns: namespace the remove is being done from
* @subj: label attempting to remove policy
* @fqname: name of the profile or namespace to remove (NOT NULL)
* @size: size of the name
*
* Remove a profile or sub namespace from the current namespace, so that
* they can not be found anymore and mark them as replaced by unconfined
*
* NOTE: removing confinement does not restore rlimits to preconfinement values
*
* Returns: size of data consume else error code if fails
*/
ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
char *fqname, size_t size)
{
struct aa_ns *ns = NULL;
struct aa_profile *profile = NULL;
const char *name = fqname, *info = NULL;
const char *ns_name = NULL;
ssize_t error = 0;
if (*fqname == 0) {
info = "no profile specified";
error = -ENOENT;
goto fail;
}
if (fqname[0] == ':') {
size_t ns_len;
name = aa_splitn_fqname(fqname, size, &ns_name, &ns_len);
/* released below */
ns = aa_lookupn_ns(policy_ns ? policy_ns : labels_ns(subj),
ns_name, ns_len);
if (!ns) {
info = "namespace does not exist";
error = -ENOENT;
goto fail;
}
} else
/* released below */
ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(subj));
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
mutex_lock_nested(&ns->parent->lock, ns->parent->level);
__aa_bump_ns_revision(ns);
__aa_remove_ns(ns);
mutex_unlock(&ns->parent->lock);
} else {
/* remove profile */
mutex_lock_nested(&ns->lock, ns->level);
profile = aa_get_profile(__lookup_profile(&ns->base, name));
if (!profile) {
error = -ENOENT;
info = "profile does not exist";
goto fail_ns_lock;
}
name = profile->base.hname;
__aa_bump_ns_revision(ns);
__remove_profile(profile);
__aa_labelset_update_subtree(ns);
mutex_unlock(&ns->lock);
}
/* don't fail removal if audit fails */
(void) audit_policy(subj, OP_PROF_RM, ns_name, name, info,
error);
aa_put_ns(ns);
aa_put_profile(profile);
return size;
fail_ns_lock:
mutex_unlock(&ns->lock);
aa_put_ns(ns);
fail:
(void) audit_policy(subj, OP_PROF_RM, ns_name, name, info,
error);
return error;
}
| linux-master | security/apparmor/policy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor mediation of files
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include <linux/tty.h>
#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/cred.h"
#include "include/file.h"
#include "include/match.h"
#include "include/net.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/label.h"
static u32 map_mask_to_chr_mask(u32 mask)
{
u32 m = mask & PERMS_CHRS_MASK;
if (mask & AA_MAY_GETATTR)
m |= MAY_READ;
if (mask & (AA_MAY_SETATTR | AA_MAY_CHMOD | AA_MAY_CHOWN))
m |= MAY_WRITE;
return m;
}
/**
* file_audit_cb - call back for file specific audit fields
* @ab: audit_buffer (NOT NULL)
* @va: audit struct to audit values of (NOT NULL)
*/
static void file_audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
kuid_t fsuid = current_fsuid();
char str[10];
if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
map_mask_to_chr_mask(aad(sa)->request));
audit_log_format(ab, " requested_mask=\"%s\"", str);
}
if (aad(sa)->denied & AA_AUDIT_FILE_MASK) {
aa_perm_mask_to_str(str, sizeof(str), aa_file_perm_chrs,
map_mask_to_chr_mask(aad(sa)->denied));
audit_log_format(ab, " denied_mask=\"%s\"", str);
}
if (aad(sa)->request & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " fsuid=%d",
from_kuid(&init_user_ns, fsuid));
audit_log_format(ab, " ouid=%d",
from_kuid(&init_user_ns, aad(sa)->fs.ouid));
}
if (aad(sa)->peer) {
audit_log_format(ab, " target=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
FLAG_VIEW_SUBNS, GFP_KERNEL);
} else if (aad(sa)->fs.target) {
audit_log_format(ab, " target=");
audit_log_untrustedstring(ab, aad(sa)->fs.target);
}
}
/**
* aa_audit_file - handle the auditing of file operations
* @profile: the profile being enforced (NOT NULL)
* @perms: the permissions computed for the request (NOT NULL)
* @op: operation being mediated
* @request: permissions requested
* @name: name of object being mediated (MAYBE NULL)
* @target: name of target (MAYBE NULL)
* @tlabel: target label (MAY BE NULL)
* @ouid: object uid
* @info: extra information message (MAYBE NULL)
* @error: 0 if operation allowed else failure error code
*
* Returns: %0 or error on failure
*/
int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
const char *op, u32 request, const char *name,
const char *target, struct aa_label *tlabel,
kuid_t ouid, const char *info, int error)
{
int type = AUDIT_APPARMOR_AUTO;
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_TASK, AA_CLASS_FILE, op);
sa.u.tsk = NULL;
aad(&sa)->request = request;
aad(&sa)->name = name;
aad(&sa)->fs.target = target;
aad(&sa)->peer = tlabel;
aad(&sa)->fs.ouid = ouid;
aad(&sa)->info = info;
aad(&sa)->error = error;
sa.u.tsk = NULL;
if (likely(!aad(&sa)->error)) {
u32 mask = perms->audit;
if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
mask = 0xffff;
/* mask off perms that are not being force audited */
aad(&sa)->request &= mask;
if (likely(!aad(&sa)->request))
return 0;
type = AUDIT_APPARMOR_AUDIT;
} else {
/* only report permissions that were denied */
aad(&sa)->request = aad(&sa)->request & ~perms->allow;
AA_BUG(!aad(&sa)->request);
if (aad(&sa)->request & perms->kill)
type = AUDIT_APPARMOR_KILL;
/* quiet known rejects, assumes quiet and kill do not overlap */
if ((aad(&sa)->request & perms->quiet) &&
AUDIT_MODE(profile) != AUDIT_NOQUIET &&
AUDIT_MODE(profile) != AUDIT_ALL)
aad(&sa)->request &= ~perms->quiet;
if (!aad(&sa)->request)
return aad(&sa)->error;
}
aad(&sa)->denied = aad(&sa)->request & ~perms->allow;
return aa_audit(type, profile, &sa, file_audit_cb);
}
static int path_name(const char *op, struct aa_label *label,
const struct path *path, int flags, char *buffer,
const char **name, struct path_cond *cond, u32 request)
{
struct aa_profile *profile;
const char *info = NULL;
int error;
error = aa_path_name(path, flags, buffer, name, &info,
labels_profile(label)->disconnected);
if (error) {
fn_for_each_confined(label, profile,
aa_audit_file(profile, &nullperms, op, request, *name,
NULL, NULL, cond->uid, info, error));
return error;
}
return 0;
}
struct aa_perms default_perms = {};
/**
* aa_lookup_fperms - convert dfa compressed perms to internal perms
* @dfa: dfa to lookup perms for (NOT NULL)
* @state: state in dfa
* @cond: conditions to consider (NOT NULL)
*
* TODO: convert from dfa + state to permission entry
*
* Returns: a pointer to a file permission set
*/
struct aa_perms *aa_lookup_fperms(struct aa_policydb *file_rules,
aa_state_t state, struct path_cond *cond)
{
unsigned int index = ACCEPT_TABLE(file_rules->dfa)[state];
if (!(file_rules->perms))
return &default_perms;
if (uid_eq(current_fsuid(), cond->uid))
return &(file_rules->perms[index]);
return &(file_rules->perms[index + 1]);
}
/**
* aa_str_perms - find permission that match @name
* @dfa: to match against (MAYBE NULL)
* @state: state to start matching in
* @name: string to match against dfa (NOT NULL)
* @cond: conditions to consider for permission set computation (NOT NULL)
* @perms: Returns - the permissions found when matching @name
*
* Returns: the final state in @dfa when beginning @start and walking @name
*/
aa_state_t aa_str_perms(struct aa_policydb *file_rules, aa_state_t start,
const char *name, struct path_cond *cond,
struct aa_perms *perms)
{
aa_state_t state;
state = aa_dfa_match(file_rules->dfa, start, name);
*perms = *(aa_lookup_fperms(file_rules, state, cond));
return state;
}
static int __aa_path_perm(const char *op, struct aa_profile *profile,
const char *name, u32 request,
struct path_cond *cond, int flags,
struct aa_perms *perms)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
int e = 0;
if (profile_unconfined(profile))
return 0;
aa_str_perms(&(rules->file), rules->file.start[AA_CLASS_FILE],
name, cond, perms);
if (request & ~perms->allow)
e = -EACCES;
return aa_audit_file(profile, perms, op, request, name, NULL, NULL,
cond->uid, NULL, e);
}
static int profile_path_perm(const char *op, struct aa_profile *profile,
const struct path *path, char *buffer, u32 request,
struct path_cond *cond, int flags,
struct aa_perms *perms)
{
const char *name;
int error;
if (profile_unconfined(profile))
return 0;
error = path_name(op, &profile->label, path,
flags | profile->path_flags, buffer, &name, cond,
request);
if (error)
return error;
return __aa_path_perm(op, profile, name, request, cond, flags,
perms);
}
/**
* aa_path_perm - do permissions check & audit for @path
* @op: operation being checked
* @label: profile being enforced (NOT NULL)
* @path: path to check permissions of (NOT NULL)
* @flags: any additional path flags beyond what the profile specifies
* @request: requested permissions
* @cond: conditional info for this request (NOT NULL)
*
* Returns: %0 else error if access denied or other error
*/
int aa_path_perm(const char *op, struct aa_label *label,
const struct path *path, int flags, u32 request,
struct path_cond *cond)
{
struct aa_perms perms = {};
struct aa_profile *profile;
char *buffer = NULL;
int error;
flags |= PATH_DELEGATE_DELETED | (S_ISDIR(cond->mode) ? PATH_IS_DIR :
0);
buffer = aa_get_buffer(false);
if (!buffer)
return -ENOMEM;
error = fn_for_each_confined(label, profile,
profile_path_perm(op, profile, path, buffer, request,
cond, flags, &perms));
aa_put_buffer(buffer);
return error;
}
/**
* xindex_is_subset - helper for aa_path_link
* @link: link permission set
* @target: target permission set
*
* test target x permissions are equal OR a subset of link x permissions
* this is done as part of the subset test, where a hardlink must have
* a subset of permissions that the target has.
*
* Returns: true if subset else false
*/
static inline bool xindex_is_subset(u32 link, u32 target)
{
if (((link & ~AA_X_UNSAFE) != (target & ~AA_X_UNSAFE)) ||
((link & AA_X_UNSAFE) && !(target & AA_X_UNSAFE)))
return false;
return true;
}
static int profile_path_link(struct aa_profile *profile,
const struct path *link, char *buffer,
const struct path *target, char *buffer2,
struct path_cond *cond)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
const char *lname, *tname = NULL;
struct aa_perms lperms = {}, perms;
const char *info = NULL;
u32 request = AA_MAY_LINK;
aa_state_t state;
int error;
error = path_name(OP_LINK, &profile->label, link, profile->path_flags,
buffer, &lname, cond, AA_MAY_LINK);
if (error)
goto audit;
/* buffer2 freed below, tname is pointer in buffer2 */
error = path_name(OP_LINK, &profile->label, target, profile->path_flags,
buffer2, &tname, cond, AA_MAY_LINK);
if (error)
goto audit;
error = -EACCES;
/* aa_str_perms - handles the case of the dfa being NULL */
state = aa_str_perms(&(rules->file),
rules->file.start[AA_CLASS_FILE], lname,
cond, &lperms);
if (!(lperms.allow & AA_MAY_LINK))
goto audit;
/* test to see if target can be paired with link */
state = aa_dfa_null_transition(rules->file.dfa, state);
aa_str_perms(&(rules->file), state, tname, cond, &perms);
/* force audit/quiet masks for link are stored in the second entry
* in the link pair.
*/
lperms.audit = perms.audit;
lperms.quiet = perms.quiet;
lperms.kill = perms.kill;
if (!(perms.allow & AA_MAY_LINK)) {
info = "target restricted";
lperms = perms;
goto audit;
}
/* done if link subset test is not required */
if (!(perms.allow & AA_LINK_SUBSET))
goto done_tests;
/* Do link perm subset test requiring allowed permission on link are
* a subset of the allowed permissions on target.
*/
aa_str_perms(&(rules->file), rules->file.start[AA_CLASS_FILE],
tname, cond, &perms);
/* AA_MAY_LINK is not considered in the subset test */
request = lperms.allow & ~AA_MAY_LINK;
lperms.allow &= perms.allow | AA_MAY_LINK;
request |= AA_AUDIT_FILE_MASK & (lperms.allow & ~perms.allow);
if (request & ~lperms.allow) {
goto audit;
} else if ((lperms.allow & MAY_EXEC) &&
!xindex_is_subset(lperms.xindex, perms.xindex)) {
lperms.allow &= ~MAY_EXEC;
request |= MAY_EXEC;
info = "link not subset of target";
goto audit;
}
done_tests:
error = 0;
audit:
return aa_audit_file(profile, &lperms, OP_LINK, request, lname, tname,
NULL, cond->uid, info, error);
}
/**
* aa_path_link - Handle hard link permission check
* @label: the label being enforced (NOT NULL)
* @old_dentry: the target dentry (NOT NULL)
* @new_dir: directory the new link will be created in (NOT NULL)
* @new_dentry: the link being created (NOT NULL)
*
* Handle the permission test for a link & target pair. Permission
* is encoded as a pair where the link permission is determined
* first, and if allowed, the target is tested. The target test
* is done from the point of the link match (not start of DFA)
* making the target permission dependent on the link permission match.
*
* The subset test if required forces that permissions granted
* on link are a subset of the permission granted to target.
*
* Returns: %0 if allowed else error
*/
int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry)
{
struct path link = { .mnt = new_dir->mnt, .dentry = new_dentry };
struct path target = { .mnt = new_dir->mnt, .dentry = old_dentry };
struct path_cond cond = {
d_backing_inode(old_dentry)->i_uid,
d_backing_inode(old_dentry)->i_mode
};
char *buffer = NULL, *buffer2 = NULL;
struct aa_profile *profile;
int error;
/* buffer freed below, lname is pointer in buffer */
buffer = aa_get_buffer(false);
buffer2 = aa_get_buffer(false);
error = -ENOMEM;
if (!buffer || !buffer2)
goto out;
error = fn_for_each_confined(label, profile,
profile_path_link(profile, &link, buffer, &target,
buffer2, &cond));
out:
aa_put_buffer(buffer);
aa_put_buffer(buffer2);
return error;
}
static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
u32 request)
{
struct aa_label *l, *old;
/* update caching of label on file_ctx */
spin_lock(&fctx->lock);
old = rcu_dereference_protected(fctx->label,
lockdep_is_held(&fctx->lock));
l = aa_label_merge(old, label, GFP_ATOMIC);
if (l) {
if (l != old) {
rcu_assign_pointer(fctx->label, l);
aa_put_label(old);
} else
aa_put_label(l);
fctx->allow |= request;
}
spin_unlock(&fctx->lock);
}
static int __file_path_perm(const char *op, struct aa_label *label,
struct aa_label *flabel, struct file *file,
u32 request, u32 denied, bool in_atomic)
{
struct aa_profile *profile;
struct aa_perms perms = {};
vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(file),
file_inode(file));
struct path_cond cond = {
.uid = vfsuid_into_kuid(vfsuid),
.mode = file_inode(file)->i_mode
};
char *buffer;
int flags, error;
/* revalidation due to label out of date. No revocation at this time */
if (!denied && aa_label_is_subset(flabel, label))
/* TODO: check for revocation on stale profiles */
return 0;
flags = PATH_DELEGATE_DELETED | (S_ISDIR(cond.mode) ? PATH_IS_DIR : 0);
buffer = aa_get_buffer(in_atomic);
if (!buffer)
return -ENOMEM;
/* check every profile in task label not in current cache */
error = fn_for_each_not_in_set(flabel, label, profile,
profile_path_perm(op, profile, &file->f_path, buffer,
request, &cond, flags, &perms));
if (denied && !error) {
/*
* check every profile in file label that was not tested
* in the initial check above.
*
* TODO: cache full perms so this only happens because of
* conditionals
* TODO: don't audit here
*/
if (label == flabel)
error = fn_for_each(label, profile,
profile_path_perm(op, profile, &file->f_path,
buffer, request, &cond, flags,
&perms));
else
error = fn_for_each_not_in_set(label, flabel, profile,
profile_path_perm(op, profile, &file->f_path,
buffer, request, &cond, flags,
&perms));
}
if (!error)
update_file_ctx(file_ctx(file), label, request);
aa_put_buffer(buffer);
return error;
}
static int __file_sock_perm(const char *op, struct aa_label *label,
struct aa_label *flabel, struct file *file,
u32 request, u32 denied)
{
struct socket *sock = (struct socket *) file->private_data;
int error;
AA_BUG(!sock);
/* revalidation due to label out of date. No revocation at this time */
if (!denied && aa_label_is_subset(flabel, label))
return 0;
/* TODO: improve to skip profiles cached in flabel */
error = aa_sock_file_perm(label, op, request, sock);
if (denied) {
/* TODO: improve to skip profiles checked above */
/* check every profile in file label to is cached */
last_error(error, aa_sock_file_perm(flabel, op, request, sock));
}
if (!error)
update_file_ctx(file_ctx(file), label, request);
return error;
}
/**
* aa_file_perm - do permission revalidation check & audit for @file
* @op: operation being checked
* @label: label being enforced (NOT NULL)
* @file: file to revalidate access permissions on (NOT NULL)
* @request: requested permissions
* @in_atomic: whether allocations need to be done in atomic context
*
* Returns: %0 if access allowed else error
*/
int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
u32 request, bool in_atomic)
{
struct aa_file_ctx *fctx;
struct aa_label *flabel;
u32 denied;
int error = 0;
AA_BUG(!label);
AA_BUG(!file);
fctx = file_ctx(file);
rcu_read_lock();
flabel = rcu_dereference(fctx->label);
AA_BUG(!flabel);
/* revalidate access, if task is unconfined, or the cached cred
* doesn't match or if the request is for more permissions than
* was granted.
*
* Note: the test for !unconfined(flabel) is to handle file
* delegation from unconfined tasks
*/
denied = request & ~fctx->allow;
if (unconfined(label) || unconfined(flabel) ||
(!denied && aa_label_is_subset(flabel, label))) {
rcu_read_unlock();
goto done;
}
flabel = aa_get_newest_label(flabel);
rcu_read_unlock();
/* TODO: label cross check */
if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
error = __file_path_perm(op, label, flabel, file, request,
denied, in_atomic);
else if (S_ISSOCK(file_inode(file)->i_mode))
error = __file_sock_perm(op, label, flabel, file, request,
denied);
aa_put_label(flabel);
done:
return error;
}
static void revalidate_tty(struct aa_label *label)
{
struct tty_struct *tty;
int drop_tty = 0;
tty = get_current_tty();
if (!tty)
return;
spin_lock(&tty->files_lock);
if (!list_empty(&tty->tty_files)) {
struct tty_file_private *file_priv;
struct file *file;
/* TODO: Revalidate access to controlling tty. */
file_priv = list_first_entry(&tty->tty_files,
struct tty_file_private, list);
file = file_priv->file;
if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
IN_ATOMIC))
drop_tty = 1;
}
spin_unlock(&tty->files_lock);
tty_kref_put(tty);
if (drop_tty)
no_tty();
}
static int match_file(const void *p, struct file *file, unsigned int fd)
{
struct aa_label *label = (struct aa_label *)p;
if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
IN_ATOMIC))
return fd + 1;
return 0;
}
/* based on selinux's flush_unauthorized_files */
void aa_inherit_files(const struct cred *cred, struct files_struct *files)
{
struct aa_label *label = aa_get_newest_cred_label(cred);
struct file *devnull = NULL;
unsigned int n;
revalidate_tty(label);
/* Revalidate access to inherited open files. */
n = iterate_fd(files, 0, match_file, label);
if (!n) /* none found? */
goto out;
devnull = dentry_open(&aa_null, O_RDWR, cred);
if (IS_ERR(devnull))
devnull = NULL;
/* replace all the matching ones with this */
do {
replace_fd(n - 1, devnull, 0);
} while ((n = iterate_fd(files, n, match_file, label)) != 0);
if (devnull)
fput(devnull);
out:
aa_put_label(label);
}
| linux-master | security/apparmor/file.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor ipc mediation
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2017 Canonical Ltd.
*/
#include <linux/gfp.h>
#include "include/audit.h"
#include "include/capability.h"
#include "include/cred.h"
#include "include/policy.h"
#include "include/ipc.h"
#include "include/sig_names.h"
static inline int map_signal_num(int sig)
{
if (sig > SIGRTMAX)
return SIGUNKNOWN;
else if (sig >= SIGRTMIN)
return sig - SIGRTMIN + SIGRT_BASE;
else if (sig < MAXMAPPED_SIG)
return sig_map[sig];
return SIGUNKNOWN;
}
/**
* audit_signal_mask - convert mask to permission string
* @mask: permission mask to convert
*
* Returns: pointer to static string
*/
static const char *audit_signal_mask(u32 mask)
{
if (mask & MAY_READ)
return "receive";
if (mask & MAY_WRITE)
return "send";
return "";
}
/**
* audit_signal_cb() - call back for signal specific audit fields
* @ab: audit_buffer (NOT NULL)
* @va: audit struct to audit values of (NOT NULL)
*/
static void audit_signal_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
if (aad(sa)->request & AA_SIGNAL_PERM_MASK) {
audit_log_format(ab, " requested_mask=\"%s\"",
audit_signal_mask(aad(sa)->request));
if (aad(sa)->denied & AA_SIGNAL_PERM_MASK) {
audit_log_format(ab, " denied_mask=\"%s\"",
audit_signal_mask(aad(sa)->denied));
}
}
if (aad(sa)->signal == SIGUNKNOWN)
audit_log_format(ab, "signal=unknown(%d)",
aad(sa)->unmappedsig);
else if (aad(sa)->signal < MAXMAPPED_SIGNAME)
audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
else
audit_log_format(ab, " signal=rtmin+%d",
aad(sa)->signal - SIGRT_BASE);
audit_log_format(ab, " peer=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
FLAGS_NONE, GFP_ATOMIC);
}
static int profile_signal_perm(struct aa_profile *profile,
struct aa_label *peer, u32 request,
struct common_audit_data *sa)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_perms perms;
aa_state_t state;
if (profile_unconfined(profile) ||
!ANY_RULE_MEDIATES(&profile->rules, AA_CLASS_SIGNAL))
return 0;
aad(sa)->peer = peer;
/* TODO: secondary cache check <profile, profile, perm> */
state = aa_dfa_next(rules->policy.dfa,
rules->policy.start[AA_CLASS_SIGNAL],
aad(sa)->signal);
aa_label_match(profile, rules, peer, state, false, request, &perms);
aa_apply_modes_to_perms(profile, &perms);
return aa_check_perms(profile, &perms, request, sa, audit_signal_cb);
}
int aa_may_signal(struct aa_label *sender, struct aa_label *target, int sig)
{
struct aa_profile *profile;
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, AA_CLASS_SIGNAL, OP_SIGNAL);
aad(&sa)->signal = map_signal_num(sig);
aad(&sa)->unmappedsig = sig;
return xcheck_labels(sender, target, profile,
profile_signal_perm(profile, target, MAY_WRITE, &sa),
profile_signal_perm(profile, sender, MAY_READ, &sa));
}
| linux-master | security/apparmor/ipc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor policy loading interface function definitions.
*
* Copyright 2013 Canonical Ltd.
*
* Fns to provide a checksum of policy that has been loaded this can be
* compared to userspace policy compiles to check loaded policy is what
* it should be.
*/
#include <crypto/hash.h>
#include "include/apparmor.h"
#include "include/crypto.h"
static unsigned int apparmor_hash_size;
static struct crypto_shash *apparmor_tfm;
unsigned int aa_hash_size(void)
{
return apparmor_hash_size;
}
char *aa_calc_hash(void *data, size_t len)
{
SHASH_DESC_ON_STACK(desc, apparmor_tfm);
char *hash;
int error;
if (!apparmor_tfm)
return NULL;
hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
if (!hash)
return ERR_PTR(-ENOMEM);
desc->tfm = apparmor_tfm;
error = crypto_shash_init(desc);
if (error)
goto fail;
error = crypto_shash_update(desc, (u8 *) data, len);
if (error)
goto fail;
error = crypto_shash_final(desc, hash);
if (error)
goto fail;
return hash;
fail:
kfree(hash);
return ERR_PTR(error);
}
int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
size_t len)
{
SHASH_DESC_ON_STACK(desc, apparmor_tfm);
int error;
__le32 le32_version = cpu_to_le32(version);
if (!aa_g_hash_policy)
return 0;
if (!apparmor_tfm)
return 0;
profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
if (!profile->hash)
return -ENOMEM;
desc->tfm = apparmor_tfm;
error = crypto_shash_init(desc);
if (error)
goto fail;
error = crypto_shash_update(desc, (u8 *) &le32_version, 4);
if (error)
goto fail;
error = crypto_shash_update(desc, (u8 *) start, len);
if (error)
goto fail;
error = crypto_shash_final(desc, profile->hash);
if (error)
goto fail;
return 0;
fail:
kfree(profile->hash);
profile->hash = NULL;
return error;
}
static int __init init_profile_hash(void)
{
struct crypto_shash *tfm;
if (!apparmor_initialized)
return 0;
tfm = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(tfm)) {
int error = PTR_ERR(tfm);
AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
return error;
}
apparmor_tfm = tfm;
apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
aa_info_message("AppArmor sha1 policy hashing enabled");
return 0;
}
late_initcall(init_profile_hash);
| linux-master | security/apparmor/crypto.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor capability mediation functions
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/security.h>
#include "include/apparmor.h"
#include "include/capability.h"
#include "include/cred.h"
#include "include/policy.h"
#include "include/audit.h"
/*
* Table of capability names: we generate it from capabilities.h.
*/
#include "capability_names.h"
struct aa_sfs_entry aa_sfs_entry_caps[] = {
AA_SFS_FILE_STRING("mask", AA_SFS_CAPS_MASK),
{ }
};
struct audit_cache {
struct aa_profile *profile;
kernel_cap_t caps;
};
static DEFINE_PER_CPU(struct audit_cache, audit_cache);
/**
* audit_cb - call back for capability components of audit struct
* @ab - audit buffer (NOT NULL)
* @va - audit struct to audit data from (NOT NULL)
*/
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
audit_log_format(ab, " capname=");
audit_log_untrustedstring(ab, capability_names[sa->u.cap]);
}
/**
* audit_caps - audit a capability
* @sa: audit data
* @profile: profile being tested for confinement (NOT NULL)
* @cap: capability tested
* @error: error code returned by test
*
* Do auditing of capability and handle, audit/complain/kill modes switching
* and duplicate message elimination.
*
* Returns: 0 or sa->error on success, error code on failure
*/
static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile,
int cap, int error)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct audit_cache *ent;
int type = AUDIT_APPARMOR_AUTO;
aad(sa)->error = error;
if (likely(!error)) {
/* test if auditing is being forced */
if (likely((AUDIT_MODE(profile) != AUDIT_ALL) &&
!cap_raised(rules->caps.audit, cap)))
return 0;
type = AUDIT_APPARMOR_AUDIT;
} else if (KILL_MODE(profile) ||
cap_raised(rules->caps.kill, cap)) {
type = AUDIT_APPARMOR_KILL;
} else if (cap_raised(rules->caps.quiet, cap) &&
AUDIT_MODE(profile) != AUDIT_NOQUIET &&
AUDIT_MODE(profile) != AUDIT_ALL) {
/* quiet auditing */
return error;
}
/* Do simple duplicate message elimination */
ent = &get_cpu_var(audit_cache);
if (profile == ent->profile && cap_raised(ent->caps, cap)) {
put_cpu_var(audit_cache);
if (COMPLAIN_MODE(profile))
return complain_error(error);
return error;
} else {
aa_put_profile(ent->profile);
ent->profile = aa_get_profile(profile);
cap_raise(ent->caps, cap);
}
put_cpu_var(audit_cache);
return aa_audit(type, profile, sa, audit_cb);
}
/**
* profile_capable - test if profile allows use of capability @cap
* @profile: profile being enforced (NOT NULL, NOT unconfined)
* @cap: capability to test if allowed
* @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
* @sa: audit data (MAY BE NULL indicating no auditing)
*
* Returns: 0 if allowed else -EPERM
*/
static int profile_capable(struct aa_profile *profile, int cap,
unsigned int opts, struct common_audit_data *sa)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
int error;
if (cap_raised(rules->caps.allow, cap) &&
!cap_raised(rules->caps.denied, cap))
error = 0;
else
error = -EPERM;
if (opts & CAP_OPT_NOAUDIT) {
if (!COMPLAIN_MODE(profile))
return error;
/* audit the cap request in complain mode but note that it
* should be optional.
*/
aad(sa)->info = "optional: no audit";
}
return audit_caps(sa, profile, cap, error);
}
/**
* aa_capable - test permission to use capability
* @label: label being tested for capability (NOT NULL)
* @cap: capability to be tested
* @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated
*
* Look up capability in profile capability set.
*
* Returns: 0 on success, or else an error code.
*/
int aa_capable(struct aa_label *label, int cap, unsigned int opts)
{
struct aa_profile *profile;
int error = 0;
DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_CAP, AA_CLASS_CAP, OP_CAPABLE);
sa.u.cap = cap;
error = fn_for_each_confined(label, profile,
profile_capable(profile, cap, opts, &sa));
return error;
}
| linux-master | security/apparmor/capability.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor /proc/<pid>/attr/ interface functions
*
* Copyright (C) 1998-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include "include/apparmor.h"
#include "include/cred.h"
#include "include/policy.h"
#include "include/policy_ns.h"
#include "include/domain.h"
#include "include/procattr.h"
/**
* aa_getprocattr - Return the label information for @label
* @label: the label to print label info about (NOT NULL)
* @string: Returns - string containing the label info (NOT NULL)
*
* Requires: label != NULL && string != NULL
*
* Creates a string containing the label information for @label.
*
* Returns: size of string placed in @string else error code on failure
*/
int aa_getprocattr(struct aa_label *label, char **string)
{
struct aa_ns *ns = labels_ns(label);
struct aa_ns *current_ns = aa_get_current_ns();
int len;
if (!aa_ns_visible(current_ns, ns, true)) {
aa_put_ns(current_ns);
return -EACCES;
}
len = aa_label_snxprint(NULL, 0, current_ns, label,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
FLAG_HIDDEN_UNCONFINED);
AA_BUG(len < 0);
*string = kmalloc(len + 2, GFP_KERNEL);
if (!*string) {
aa_put_ns(current_ns);
return -ENOMEM;
}
len = aa_label_snxprint(*string, len + 2, current_ns, label,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
FLAG_HIDDEN_UNCONFINED);
if (len < 0) {
aa_put_ns(current_ns);
return len;
}
(*string)[len] = '\n';
(*string)[len + 1] = 0;
aa_put_ns(current_ns);
return len + 1;
}
/**
* split_token_from_name - separate a string of form <token>^<name>
* @op: operation being checked
* @args: string to parse (NOT NULL)
* @token: stores returned parsed token value (NOT NULL)
*
* Returns: start position of name after token else NULL on failure
*/
static char *split_token_from_name(const char *op, char *args, u64 *token)
{
char *name;
*token = simple_strtoull(args, &name, 16);
if ((name == args) || *name != '^') {
AA_ERROR("%s: Invalid input '%s'", op, args);
return ERR_PTR(-EINVAL);
}
name++; /* skip ^ */
if (!*name)
name = NULL;
return name;
}
/**
* aa_setprocattr_changehat - handle procattr interface to change_hat
* @args: args received from writing to /proc/<pid>/attr/current (NOT NULL)
* @size: size of the args
* @flags: set of flags governing behavior
*
* Returns: %0 or error code if change_hat fails
*/
int aa_setprocattr_changehat(char *args, size_t size, int flags)
{
char *hat;
u64 token;
const char *hats[16]; /* current hard limit on # of names */
int count = 0;
hat = split_token_from_name(OP_CHANGE_HAT, args, &token);
if (IS_ERR(hat))
return PTR_ERR(hat);
if (!hat && !token) {
AA_ERROR("change_hat: Invalid input, NULL hat and NULL magic");
return -EINVAL;
}
if (hat) {
/* set up hat name vector, args guaranteed null terminated
* at args[size] by setprocattr.
*
* If there are multiple hat names in the buffer each is
* separated by a \0. Ie. userspace writes them pre tokenized
*/
char *end = args + size;
for (count = 0; (hat < end) && count < 16; ++count) {
char *next = hat + strlen(hat) + 1;
hats[count] = hat;
AA_DEBUG("%s: (pid %d) Magic 0x%llx count %d hat '%s'\n"
, __func__, current->pid, token, count, hat);
hat = next;
}
} else
AA_DEBUG("%s: (pid %d) Magic 0x%llx count %d Hat '%s'\n",
__func__, current->pid, token, count, "<NULL>");
return aa_change_hat(hats, count, token, flags);
}
| linux-master | security/apparmor/procattr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor policy attachment and domain transitions
*
* Copyright (C) 2002-2008 Novell/SUSE
* Copyright 2009-2010 Canonical Ltd.
*/
#include <linux/errno.h>
#include <linux/fdtable.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mount.h>
#include <linux/syscalls.h>
#include <linux/personality.h>
#include <linux/xattr.h>
#include <linux/user_namespace.h>
#include "include/audit.h"
#include "include/apparmorfs.h"
#include "include/cred.h"
#include "include/domain.h"
#include "include/file.h"
#include "include/ipc.h"
#include "include/match.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/policy_ns.h"
/**
* may_change_ptraced_domain - check if can change profile on ptraced task
* @to_label: profile to change to (NOT NULL)
* @info: message if there is an error
*
* Check if current is ptraced and if so if the tracing task is allowed
* to trace the new domain
*
* Returns: %0 or error if change not allowed
*/
static int may_change_ptraced_domain(struct aa_label *to_label,
const char **info)
{
struct task_struct *tracer;
struct aa_label *tracerl = NULL;
int error = 0;
rcu_read_lock();
tracer = ptrace_parent(current);
if (tracer)
/* released below */
tracerl = aa_get_task_label(tracer);
/* not ptraced */
if (!tracer || unconfined(tracerl))
goto out;
error = aa_may_ptrace(tracerl, to_label, PTRACE_MODE_ATTACH);
out:
rcu_read_unlock();
aa_put_label(tracerl);
if (error)
*info = "ptrace prevents transition";
return error;
}
/**** TODO: dedup to aa_label_match - needs perm and dfa, merging
* specifically this is an exact copy of aa_label_match except
* aa_compute_perms is replaced with aa_compute_fperms
* and policy.dfa with file.dfa
****/
/* match a profile and its associated ns component if needed
* Assumes visibility test has already been done.
* If a subns profile is not to be matched should be prescreened with
* visibility test.
*/
static inline aa_state_t match_component(struct aa_profile *profile,
struct aa_profile *tp,
bool stack, aa_state_t state)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
const char *ns_name;
if (stack)
state = aa_dfa_match(rules->file.dfa, state, "&");
if (profile->ns == tp->ns)
return aa_dfa_match(rules->file.dfa, state, tp->base.hname);
/* try matching with namespace name and then profile */
ns_name = aa_ns_name(profile->ns, tp->ns, true);
state = aa_dfa_match_len(rules->file.dfa, state, ":", 1);
state = aa_dfa_match(rules->file.dfa, state, ns_name);
state = aa_dfa_match_len(rules->file.dfa, state, ":", 1);
return aa_dfa_match(rules->file.dfa, state, tp->base.hname);
}
/**
* label_compound_match - find perms for full compound label
* @profile: profile to find perms for
* @label: label to check access permissions for
* @stack: whether this is a stacking request
* @state: state to start match in
* @subns: whether to do permission checks on components in a subns
* @request: permissions to request
* @perms: perms struct to set
*
* Returns: 0 on success else ERROR
*
* For the label A//&B//&C this does the perm match for A//&B//&C
* @perms should be preinitialized with allperms OR a previous permission
* check to be stacked.
*/
static int label_compound_match(struct aa_profile *profile,
struct aa_label *label, bool stack,
aa_state_t state, bool subns, u32 request,
struct aa_perms *perms)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_profile *tp;
struct label_it i;
struct path_cond cond = { };
/* find first subcomponent that is visible */
label_for_each(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
state = match_component(profile, tp, stack, state);
if (!state)
goto fail;
goto next;
}
/* no component visible */
*perms = allperms;
return 0;
next:
label_for_each_cont(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
state = aa_dfa_match(rules->file.dfa, state, "//&");
state = match_component(profile, tp, false, state);
if (!state)
goto fail;
}
*perms = *(aa_lookup_fperms(&(rules->file), state, &cond));
aa_apply_modes_to_perms(profile, perms);
if ((perms->allow & request) != request)
return -EACCES;
return 0;
fail:
*perms = nullperms;
return -EACCES;
}
/**
* label_components_match - find perms for all subcomponents of a label
* @profile: profile to find perms for
* @label: label to check access permissions for
* @stack: whether this is a stacking request
* @start: state to start match in
* @subns: whether to do permission checks on components in a subns
* @request: permissions to request
* @perms: an initialized perms struct to add accumulation to
*
* Returns: 0 on success else ERROR
*
* For the label A//&B//&C this does the perm match for each of A and B and C
* @perms should be preinitialized with allperms OR a previous permission
* check to be stacked.
*/
static int label_components_match(struct aa_profile *profile,
struct aa_label *label, bool stack,
aa_state_t start, bool subns, u32 request,
struct aa_perms *perms)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_profile *tp;
struct label_it i;
struct aa_perms tmp;
struct path_cond cond = { };
aa_state_t state = 0;
/* find first subcomponent to test */
label_for_each(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
state = match_component(profile, tp, stack, start);
if (!state)
goto fail;
goto next;
}
/* no subcomponents visible - no change in perms */
return 0;
next:
tmp = *(aa_lookup_fperms(&(rules->file), state, &cond));
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
label_for_each_cont(i, label, tp) {
if (!aa_ns_visible(profile->ns, tp->ns, subns))
continue;
state = match_component(profile, tp, stack, start);
if (!state)
goto fail;
tmp = *(aa_lookup_fperms(&(rules->file), state, &cond));
aa_apply_modes_to_perms(profile, &tmp);
aa_perms_accum(perms, &tmp);
}
if ((perms->allow & request) != request)
return -EACCES;
return 0;
fail:
*perms = nullperms;
return -EACCES;
}
/**
* label_match - do a multi-component label match
* @profile: profile to match against (NOT NULL)
* @label: label to match (NOT NULL)
* @stack: whether this is a stacking request
* @state: state to start in
* @subns: whether to match subns components
* @request: permission request
* @perms: Returns computed perms (NOT NULL)
*
* Returns: the state the match finished in, may be the none matching state
*/
static int label_match(struct aa_profile *profile, struct aa_label *label,
bool stack, aa_state_t state, bool subns, u32 request,
struct aa_perms *perms)
{
int error;
*perms = nullperms;
error = label_compound_match(profile, label, stack, state, subns,
request, perms);
if (!error)
return error;
*perms = allperms;
return label_components_match(profile, label, stack, state, subns,
request, perms);
}
/******* end TODO: dedup *****/
/**
* change_profile_perms - find permissions for change_profile
* @profile: the current profile (NOT NULL)
* @target: label to transition to (NOT NULL)
* @stack: whether this is a stacking request
* @request: requested perms
* @start: state to start matching in
*
*
* Returns: permission set
*
* currently only matches full label A//&B//&C or individual components A, B, C
* not arbitrary combinations. Eg. A//&B, C
*/
static int change_profile_perms(struct aa_profile *profile,
struct aa_label *target, bool stack,
u32 request, aa_state_t start,
struct aa_perms *perms)
{
if (profile_unconfined(profile)) {
perms->allow = AA_MAY_CHANGE_PROFILE | AA_MAY_ONEXEC;
perms->audit = perms->quiet = perms->kill = 0;
return 0;
}
/* TODO: add profile in ns screening */
return label_match(profile, target, stack, start, true, request, perms);
}
/**
* aa_xattrs_match - check whether a file matches the xattrs defined in profile
* @bprm: binprm struct for the process to validate
* @profile: profile to match against (NOT NULL)
* @state: state to start match in
*
* Returns: number of extended attributes that matched, or < 0 on error
*/
static int aa_xattrs_match(const struct linux_binprm *bprm,
struct aa_profile *profile, aa_state_t state)
{
int i;
struct dentry *d;
char *value = NULL;
struct aa_attachment *attach = &profile->attach;
int size, value_size = 0, ret = attach->xattr_count;
if (!bprm || !attach->xattr_count)
return 0;
might_sleep();
/* transition from exec match to xattr set */
state = aa_dfa_outofband_transition(attach->xmatch.dfa, state);
d = bprm->file->f_path.dentry;
for (i = 0; i < attach->xattr_count; i++) {
size = vfs_getxattr_alloc(&nop_mnt_idmap, d, attach->xattrs[i],
&value, value_size, GFP_KERNEL);
if (size >= 0) {
u32 index, perm;
/*
* Check the xattr presence before value. This ensure
* that not present xattr can be distinguished from a 0
* length value or rule that matches any value
*/
state = aa_dfa_null_transition(attach->xmatch.dfa,
state);
/* Check xattr value */
state = aa_dfa_match_len(attach->xmatch.dfa, state,
value, size);
index = ACCEPT_TABLE(attach->xmatch.dfa)[state];
perm = attach->xmatch.perms[index].allow;
if (!(perm & MAY_EXEC)) {
ret = -EINVAL;
goto out;
}
}
/* transition to next element */
state = aa_dfa_outofband_transition(attach->xmatch.dfa, state);
if (size < 0) {
/*
* No xattr match, so verify if transition to
* next element was valid. IFF so the xattr
* was optional.
*/
if (!state) {
ret = -EINVAL;
goto out;
}
/* don't count missing optional xattr as matched */
ret--;
}
}
out:
kfree(value);
return ret;
}
/**
* find_attach - do attachment search for unconfined processes
* @bprm - binprm structure of transitioning task
* @ns: the current namespace (NOT NULL)
* @head - profile list to walk (NOT NULL)
* @name - to match against (NOT NULL)
* @info - info message if there was an error (NOT NULL)
*
* Do a linear search on the profiles in the list. There is a matching
* preference where an exact match is preferred over a name which uses
* expressions to match, and matching expressions with the greatest
* xmatch_len are preferred.
*
* Requires: @head not be shared or have appropriate locks held
*
* Returns: label or NULL if no match found
*/
static struct aa_label *find_attach(const struct linux_binprm *bprm,
struct aa_ns *ns, struct list_head *head,
const char *name, const char **info)
{
int candidate_len = 0, candidate_xattrs = 0;
bool conflict = false;
struct aa_profile *profile, *candidate = NULL;
AA_BUG(!name);
AA_BUG(!head);
rcu_read_lock();
restart:
list_for_each_entry_rcu(profile, head, base.list) {
struct aa_attachment *attach = &profile->attach;
if (profile->label.flags & FLAG_NULL &&
&profile->label == ns_unconfined(profile->ns))
continue;
/* Find the "best" matching profile. Profiles must
* match the path and extended attributes (if any)
* associated with the file. A more specific path
* match will be preferred over a less specific one,
* and a match with more matching extended attributes
* will be preferred over one with fewer. If the best
* match has both the same level of path specificity
* and the same number of matching extended attributes
* as another profile, signal a conflict and refuse to
* match.
*/
if (attach->xmatch.dfa) {
unsigned int count;
aa_state_t state;
u32 index, perm;
state = aa_dfa_leftmatch(attach->xmatch.dfa,
attach->xmatch.start[AA_CLASS_XMATCH],
name, &count);
index = ACCEPT_TABLE(attach->xmatch.dfa)[state];
perm = attach->xmatch.perms[index].allow;
/* any accepting state means a valid match. */
if (perm & MAY_EXEC) {
int ret = 0;
if (count < candidate_len)
continue;
if (bprm && attach->xattr_count) {
long rev = READ_ONCE(ns->revision);
if (!aa_get_profile_not0(profile))
goto restart;
rcu_read_unlock();
ret = aa_xattrs_match(bprm, profile,
state);
rcu_read_lock();
aa_put_profile(profile);
if (rev !=
READ_ONCE(ns->revision))
/* policy changed */
goto restart;
/*
* Fail matching if the xattrs don't
* match
*/
if (ret < 0)
continue;
}
/*
* TODO: allow for more flexible best match
*
* The new match isn't more specific
* than the current best match
*/
if (count == candidate_len &&
ret <= candidate_xattrs) {
/* Match is equivalent, so conflict */
if (ret == candidate_xattrs)
conflict = true;
continue;
}
/* Either the same length with more matching
* xattrs, or a longer match
*/
candidate = profile;
candidate_len = max(count, attach->xmatch_len);
candidate_xattrs = ret;
conflict = false;
}
} else if (!strcmp(profile->base.name, name)) {
/*
* old exact non-re match, without conditionals such
* as xattrs. no more searching required
*/
candidate = profile;
goto out;
}
}
if (!candidate || conflict) {
if (conflict)
*info = "conflicting profile attachments";
rcu_read_unlock();
return NULL;
}
out:
candidate = aa_get_newest_profile(candidate);
rcu_read_unlock();
return &candidate->label;
}
static const char *next_name(int xtype, const char *name)
{
return NULL;
}
/**
* x_table_lookup - lookup an x transition name via transition table
* @profile: current profile (NOT NULL)
* @xindex: index into x transition table
* @name: returns: name tested to find label (NOT NULL)
*
* Returns: refcounted label, or NULL on failure (MAYBE NULL)
*/
struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
const char **name)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_label *label = NULL;
u32 xtype = xindex & AA_X_TYPE_MASK;
int index = xindex & AA_X_INDEX_MASK;
AA_BUG(!name);
/* index is guaranteed to be in range, validated at load time */
/* TODO: move lookup parsing to unpack time so this is a straight
* index into the resultant label
*/
for (*name = rules->file.trans.table[index]; !label && *name;
*name = next_name(xtype, *name)) {
if (xindex & AA_X_CHILD) {
struct aa_profile *new_profile;
/* release by caller */
new_profile = aa_find_child(profile, *name);
if (new_profile)
label = &new_profile->label;
continue;
}
label = aa_label_parse(&profile->label, *name, GFP_KERNEL,
true, false);
if (IS_ERR(label))
label = NULL;
}
/* released by caller */
return label;
}
/**
* x_to_label - get target label for a given xindex
* @profile: current profile (NOT NULL)
* @bprm: binprm structure of transitioning task
* @name: name to lookup (NOT NULL)
* @xindex: index into x transition table
* @lookupname: returns: name used in lookup if one was specified (NOT NULL)
*
* find label for a transition index
*
* Returns: refcounted label or NULL if not found available
*/
static struct aa_label *x_to_label(struct aa_profile *profile,
const struct linux_binprm *bprm,
const char *name, u32 xindex,
const char **lookupname,
const char **info)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_label *new = NULL;
struct aa_ns *ns = profile->ns;
u32 xtype = xindex & AA_X_TYPE_MASK;
const char *stack = NULL;
switch (xtype) {
case AA_X_NONE:
/* fail exec unless ix || ux fallback - handled by caller */
*lookupname = NULL;
break;
case AA_X_TABLE:
/* TODO: fix when perm mapping done at unload */
stack = rules->file.trans.table[xindex & AA_X_INDEX_MASK];
if (*stack != '&') {
/* released by caller */
new = x_table_lookup(profile, xindex, lookupname);
stack = NULL;
break;
}
fallthrough; /* to X_NAME */
case AA_X_NAME:
if (xindex & AA_X_CHILD)
/* released by caller */
new = find_attach(bprm, ns, &profile->base.profiles,
name, info);
else
/* released by caller */
new = find_attach(bprm, ns, &ns->base.profiles,
name, info);
*lookupname = name;
break;
}
if (!new) {
if (xindex & AA_X_INHERIT) {
/* (p|c|n)ix - don't change profile but do
* use the newest version
*/
*info = "ix fallback";
/* no profile && no error */
new = aa_get_newest_label(&profile->label);
} else if (xindex & AA_X_UNCONFINED) {
new = aa_get_newest_label(ns_unconfined(profile->ns));
*info = "ux fallback";
}
}
if (new && stack) {
/* base the stack on post domain transition */
struct aa_label *base = new;
new = aa_label_parse(base, stack, GFP_KERNEL, true, false);
if (IS_ERR(new))
new = NULL;
aa_put_label(base);
}
/* released by caller */
return new;
}
static struct aa_label *profile_transition(struct aa_profile *profile,
const struct linux_binprm *bprm,
char *buffer, struct path_cond *cond,
bool *secure_exec)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
struct aa_label *new = NULL;
const char *info = NULL, *name = NULL, *target = NULL;
aa_state_t state = rules->file.start[AA_CLASS_FILE];
struct aa_perms perms = {};
bool nonewprivs = false;
int error = 0;
AA_BUG(!profile);
AA_BUG(!bprm);
AA_BUG(!buffer);
error = aa_path_name(&bprm->file->f_path, profile->path_flags, buffer,
&name, &info, profile->disconnected);
if (error) {
if (profile_unconfined(profile) ||
(profile->label.flags & FLAG_IX_ON_NAME_ERROR)) {
AA_DEBUG("name lookup ix on error");
error = 0;
new = aa_get_newest_label(&profile->label);
}
name = bprm->filename;
goto audit;
}
if (profile_unconfined(profile)) {
new = find_attach(bprm, profile->ns,
&profile->ns->base.profiles, name, &info);
if (new) {
AA_DEBUG("unconfined attached to new label");
return new;
}
AA_DEBUG("unconfined exec no attachment");
return aa_get_newest_label(&profile->label);
}
/* find exec permissions for name */
state = aa_str_perms(&(rules->file), state, name, cond, &perms);
if (perms.allow & MAY_EXEC) {
/* exec permission determine how to transition */
new = x_to_label(profile, bprm, name, perms.xindex, &target,
&info);
if (new && new->proxy == profile->label.proxy && info) {
/* hack ix fallback - improve how this is detected */
goto audit;
} else if (!new) {
error = -EACCES;
info = "profile transition not found";
/* remove MAY_EXEC to audit as failure */
perms.allow &= ~MAY_EXEC;
}
} else if (COMPLAIN_MODE(profile)) {
/* no exec permission - learning mode */
struct aa_profile *new_profile = NULL;
new_profile = aa_new_learning_profile(profile, false, name,
GFP_KERNEL);
if (!new_profile) {
error = -ENOMEM;
info = "could not create null profile";
} else {
error = -EACCES;
new = &new_profile->label;
}
perms.xindex |= AA_X_UNSAFE;
} else
/* fail exec */
error = -EACCES;
if (!new)
goto audit;
if (!(perms.xindex & AA_X_UNSAFE)) {
if (DEBUG_ON) {
dbg_printk("apparmor: scrubbing environment variables"
" for %s profile=", name);
aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
*secure_exec = true;
}
audit:
aa_audit_file(profile, &perms, OP_EXEC, MAY_EXEC, name, target, new,
cond->uid, info, error);
if (!new || nonewprivs) {
aa_put_label(new);
return ERR_PTR(error);
}
return new;
}
static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
bool stack, const struct linux_binprm *bprm,
char *buffer, struct path_cond *cond,
bool *secure_exec)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
aa_state_t state = rules->file.start[AA_CLASS_FILE];
struct aa_perms perms = {};
const char *xname = NULL, *info = "change_profile onexec";
int error = -EACCES;
AA_BUG(!profile);
AA_BUG(!onexec);
AA_BUG(!bprm);
AA_BUG(!buffer);
if (profile_unconfined(profile)) {
/* change_profile on exec already granted */
/*
* NOTE: Domain transitions from unconfined are allowed
* even when no_new_privs is set because this aways results
* in a further reduction of permissions.
*/
return 0;
}
error = aa_path_name(&bprm->file->f_path, profile->path_flags, buffer,
&xname, &info, profile->disconnected);
if (error) {
if (profile_unconfined(profile) ||
(profile->label.flags & FLAG_IX_ON_NAME_ERROR)) {
AA_DEBUG("name lookup ix on error");
error = 0;
}
xname = bprm->filename;
goto audit;
}
/* find exec permissions for name */
state = aa_str_perms(&(rules->file), state, xname, cond, &perms);
if (!(perms.allow & AA_MAY_ONEXEC)) {
info = "no change_onexec valid for executable";
goto audit;
}
/* test if this exec can be paired with change_profile onexec.
* onexec permission is linked to exec with a standard pairing
* exec\0change_profile
*/
state = aa_dfa_null_transition(rules->file.dfa, state);
error = change_profile_perms(profile, onexec, stack, AA_MAY_ONEXEC,
state, &perms);
if (error) {
perms.allow &= ~AA_MAY_ONEXEC;
goto audit;
}
if (!(perms.xindex & AA_X_UNSAFE)) {
if (DEBUG_ON) {
dbg_printk("apparmor: scrubbing environment "
"variables for %s label=", xname);
aa_label_printk(onexec, GFP_KERNEL);
dbg_printk("\n");
}
*secure_exec = true;
}
audit:
return aa_audit_file(profile, &perms, OP_EXEC, AA_MAY_ONEXEC, xname,
NULL, onexec, cond->uid, info, error);
}
/* ensure none ns domain transitions are correctly applied with onexec */
static struct aa_label *handle_onexec(struct aa_label *label,
struct aa_label *onexec, bool stack,
const struct linux_binprm *bprm,
char *buffer, struct path_cond *cond,
bool *unsafe)
{
struct aa_profile *profile;
struct aa_label *new;
int error;
AA_BUG(!label);
AA_BUG(!onexec);
AA_BUG(!bprm);
AA_BUG(!buffer);
if (!stack) {
error = fn_for_each_in_ns(label, profile,
profile_onexec(profile, onexec, stack,
bprm, buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_get_newest_label(onexec),
profile_transition(profile, bprm, buffer,
cond, unsafe));
} else {
/* TODO: determine how much we want to loosen this */
error = fn_for_each_in_ns(label, profile,
profile_onexec(profile, onexec, stack, bprm,
buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_label_merge(&profile->label, onexec,
GFP_KERNEL),
profile_transition(profile, bprm, buffer,
cond, unsafe));
}
if (new)
return new;
/* TODO: get rid of GLOBAL_ROOT_UID */
error = fn_for_each_in_ns(label, profile,
aa_audit_file(profile, &nullperms, OP_CHANGE_ONEXEC,
AA_MAY_ONEXEC, bprm->filename, NULL,
onexec, GLOBAL_ROOT_UID,
"failed to build target label", -ENOMEM));
return ERR_PTR(error);
}
/**
* apparmor_bprm_creds_for_exec - Update the new creds on the bprm struct
* @bprm: binprm for the exec (NOT NULL)
*
* Returns: %0 or error on failure
*
* TODO: once the other paths are done see if we can't refactor into a fn
*/
int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
{
struct aa_task_ctx *ctx;
struct aa_label *label, *new = NULL;
struct aa_profile *profile;
char *buffer = NULL;
const char *info = NULL;
int error = 0;
bool unsafe = false;
vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_idmap(bprm->file),
file_inode(bprm->file));
struct path_cond cond = {
vfsuid_into_kuid(vfsuid),
file_inode(bprm->file)->i_mode
};
ctx = task_ctx(current);
AA_BUG(!cred_label(bprm->cred));
AA_BUG(!ctx);
label = aa_get_newest_label(cred_label(bprm->cred));
/*
* Detect no new privs being set, and store the label it
* occurred under. Ideally this would happen when nnp
* is set but there isn't a good way to do that yet.
*
* Testing for unconfined must be done before the subset test
*/
if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) && !unconfined(label) &&
!ctx->nnp)
ctx->nnp = aa_get_label(label);
/* buffer freed below, name is pointer into buffer */
buffer = aa_get_buffer(false);
if (!buffer) {
error = -ENOMEM;
goto done;
}
/* Test for onexec first as onexec override other x transitions. */
if (ctx->onexec)
new = handle_onexec(label, ctx->onexec, ctx->token,
bprm, buffer, &cond, &unsafe);
else
new = fn_label_build(label, profile, GFP_KERNEL,
profile_transition(profile, bprm, buffer,
&cond, &unsafe));
AA_BUG(!new);
if (IS_ERR(new)) {
error = PTR_ERR(new);
goto done;
} else if (!new) {
error = -ENOMEM;
goto done;
}
/* Policy has specified a domain transitions. If no_new_privs and
* confined ensure the transition is to confinement that is subset
* of the confinement when the task entered no new privs.
*
* NOTE: Domain transitions from unconfined and to stacked
* subsets are allowed even when no_new_privs is set because this
* aways results in a further reduction of permissions.
*/
if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
!unconfined(label) &&
!aa_label_is_unconfined_subset(new, ctx->nnp)) {
error = -EPERM;
info = "no new privs";
goto audit;
}
if (bprm->unsafe & LSM_UNSAFE_SHARE) {
/* FIXME: currently don't mediate shared state */
;
}
if (bprm->unsafe & (LSM_UNSAFE_PTRACE)) {
/* TODO: test needs to be profile of label to new */
error = may_change_ptraced_domain(new, &info);
if (error)
goto audit;
}
if (unsafe) {
if (DEBUG_ON) {
dbg_printk("scrubbing environment variables for %s "
"label=", bprm->filename);
aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
bprm->secureexec = 1;
}
if (label->proxy != new->proxy) {
/* when transitioning clear unsafe personality bits */
if (DEBUG_ON) {
dbg_printk("apparmor: clearing unsafe personality "
"bits. %s label=", bprm->filename);
aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
bprm->per_clear |= PER_CLEAR_ON_SETID;
}
aa_put_label(cred_label(bprm->cred));
/* transfer reference, released when cred is freed */
set_cred_label(bprm->cred, new);
done:
aa_put_label(label);
aa_put_buffer(buffer);
return error;
audit:
error = fn_for_each(label, profile,
aa_audit_file(profile, &nullperms, OP_EXEC, MAY_EXEC,
bprm->filename, NULL, new,
vfsuid_into_kuid(vfsuid), info, error));
aa_put_label(new);
goto done;
}
/*
* Functions for self directed profile change
*/
/* helper fn for change_hat
*
* Returns: label for hat transition OR ERR_PTR. Does NOT return NULL
*/
static struct aa_label *build_change_hat(struct aa_profile *profile,
const char *name, bool sibling)
{
struct aa_profile *root, *hat = NULL;
const char *info = NULL;
int error = 0;
if (sibling && PROFILE_IS_HAT(profile)) {
root = aa_get_profile_rcu(&profile->parent);
} else if (!sibling && !PROFILE_IS_HAT(profile)) {
root = aa_get_profile(profile);
} else {
info = "conflicting target types";
error = -EPERM;
goto audit;
}
hat = aa_find_child(root, name);
if (!hat) {
error = -ENOENT;
if (COMPLAIN_MODE(profile)) {
hat = aa_new_learning_profile(profile, true, name,
GFP_KERNEL);
if (!hat) {
info = "failed null profile create";
error = -ENOMEM;
}
}
}
aa_put_profile(root);
audit:
aa_audit_file(profile, &nullperms, OP_CHANGE_HAT, AA_MAY_CHANGEHAT,
name, hat ? hat->base.hname : NULL,
hat ? &hat->label : NULL, GLOBAL_ROOT_UID, info,
error);
if (!hat || (error && error != -ENOENT))
return ERR_PTR(error);
/* if hat && error - complain mode, already audited and we adjust for
* complain mode allow by returning hat->label
*/
return &hat->label;
}
/* helper fn for changing into a hat
*
* Returns: label for hat transition or ERR_PTR. Does not return NULL
*/
static struct aa_label *change_hat(struct aa_label *label, const char *hats[],
int count, int flags)
{
struct aa_profile *profile, *root, *hat = NULL;
struct aa_label *new;
struct label_it it;
bool sibling = false;
const char *name, *info = NULL;
int i, error;
AA_BUG(!label);
AA_BUG(!hats);
AA_BUG(count < 1);
if (PROFILE_IS_HAT(labels_profile(label)))
sibling = true;
/*find first matching hat */
for (i = 0; i < count && !hat; i++) {
name = hats[i];
label_for_each_in_ns(it, labels_ns(label), label, profile) {
if (sibling && PROFILE_IS_HAT(profile)) {
root = aa_get_profile_rcu(&profile->parent);
} else if (!sibling && !PROFILE_IS_HAT(profile)) {
root = aa_get_profile(profile);
} else { /* conflicting change type */
info = "conflicting targets types";
error = -EPERM;
goto fail;
}
hat = aa_find_child(root, name);
aa_put_profile(root);
if (!hat) {
if (!COMPLAIN_MODE(profile))
goto outer_continue;
/* complain mode succeed as if hat */
} else if (!PROFILE_IS_HAT(hat)) {
info = "target not hat";
error = -EPERM;
aa_put_profile(hat);
goto fail;
}
aa_put_profile(hat);
}
/* found a hat for all profiles in ns */
goto build;
outer_continue:
;
}
/* no hats that match, find appropriate error
*
* In complain mode audit of the failure is based off of the first
* hat supplied. This is done due how userspace interacts with
* change_hat.
*/
name = NULL;
label_for_each_in_ns(it, labels_ns(label), label, profile) {
if (!list_empty(&profile->base.profiles)) {
info = "hat not found";
error = -ENOENT;
goto fail;
}
}
info = "no hats defined";
error = -ECHILD;
fail:
label_for_each_in_ns(it, labels_ns(label), label, profile) {
/*
* no target as it has failed to be found or built
*
* change_hat uses probing and should not log failures
* related to missing hats
*/
/* TODO: get rid of GLOBAL_ROOT_UID */
if (count > 1 || COMPLAIN_MODE(profile)) {
aa_audit_file(profile, &nullperms, OP_CHANGE_HAT,
AA_MAY_CHANGEHAT, name, NULL, NULL,
GLOBAL_ROOT_UID, info, error);
}
}
return ERR_PTR(error);
build:
new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
build_change_hat(profile, name, sibling),
aa_get_label(&profile->label));
if (!new) {
info = "label build failed";
error = -ENOMEM;
goto fail;
} /* else if (IS_ERR) build_change_hat has logged error so return new */
return new;
}
/**
* aa_change_hat - change hat to/from subprofile
* @hats: vector of hat names to try changing into (MAYBE NULL if @count == 0)
* @count: number of hat names in @hats
* @token: magic value to validate the hat change
* @flags: flags affecting behavior of the change
*
* Returns %0 on success, error otherwise.
*
* Change to the first profile specified in @hats that exists, and store
* the @hat_magic in the current task context. If the count == 0 and the
* @token matches that stored in the current task context, return to the
* top level profile.
*
* change_hat only applies to profiles in the current ns, and each profile
* in the ns must make the same transition otherwise change_hat will fail.
*/
int aa_change_hat(const char *hats[], int count, u64 token, int flags)
{
const struct cred *cred;
struct aa_task_ctx *ctx = task_ctx(current);
struct aa_label *label, *previous, *new = NULL, *target = NULL;
struct aa_profile *profile;
struct aa_perms perms = {};
const char *info = NULL;
int error = 0;
/* released below */
cred = get_current_cred();
label = aa_get_newest_cred_label(cred);
previous = aa_get_newest_label(ctx->previous);
/*
* Detect no new privs being set, and store the label it
* occurred under. Ideally this would happen when nnp
* is set but there isn't a good way to do that yet.
*
* Testing for unconfined must be done before the subset test
*/
if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp)
ctx->nnp = aa_get_label(label);
if (unconfined(label)) {
info = "unconfined can not change_hat";
error = -EPERM;
goto fail;
}
if (count) {
new = change_hat(label, hats, count, flags);
AA_BUG(!new);
if (IS_ERR(new)) {
error = PTR_ERR(new);
new = NULL;
/* already audited */
goto out;
}
error = may_change_ptraced_domain(new, &info);
if (error)
goto fail;
/*
* no new privs prevents domain transitions that would
* reduce restrictions.
*/
if (task_no_new_privs(current) && !unconfined(label) &&
!aa_label_is_unconfined_subset(new, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
AA_DEBUG("no_new_privs - change_hat denied");
error = -EPERM;
goto out;
}
if (flags & AA_CHANGE_TEST)
goto out;
target = new;
error = aa_set_current_hat(new, token);
if (error == -EACCES)
/* kill task in case of brute force attacks */
goto kill;
} else if (previous && !(flags & AA_CHANGE_TEST)) {
/*
* no new privs prevents domain transitions that would
* reduce restrictions.
*/
if (task_no_new_privs(current) && !unconfined(label) &&
!aa_label_is_unconfined_subset(previous, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
AA_DEBUG("no_new_privs - change_hat denied");
error = -EPERM;
goto out;
}
/* Return to saved label. Kill task if restore fails
* to avoid brute force attacks
*/
target = previous;
error = aa_restore_previous_label(token);
if (error) {
if (error == -EACCES)
goto kill;
goto fail;
}
} /* else ignore @flags && restores when there is no saved profile */
out:
aa_put_label(new);
aa_put_label(previous);
aa_put_label(label);
put_cred(cred);
return error;
kill:
info = "failed token match";
perms.kill = AA_MAY_CHANGEHAT;
fail:
fn_for_each_in_ns(label, profile,
aa_audit_file(profile, &perms, OP_CHANGE_HAT,
AA_MAY_CHANGEHAT, NULL, NULL, target,
GLOBAL_ROOT_UID, info, error));
goto out;
}
static int change_profile_perms_wrapper(const char *op, const char *name,
struct aa_profile *profile,
struct aa_label *target, bool stack,
u32 request, struct aa_perms *perms)
{
struct aa_ruleset *rules = list_first_entry(&profile->rules,
typeof(*rules), list);
const char *info = NULL;
int error = 0;
if (!error)
error = change_profile_perms(profile, target, stack, request,
rules->file.start[AA_CLASS_FILE],
perms);
if (error)
error = aa_audit_file(profile, perms, op, request, name,
NULL, target, GLOBAL_ROOT_UID, info,
error);
return error;
}
/**
* aa_change_profile - perform a one-way profile transition
* @fqname: name of profile may include namespace (NOT NULL)
* @flags: flags affecting change behavior
*
* Change to new profile @name. Unlike with hats, there is no way
* to change back. If @name isn't specified the current profile name is
* used.
* If @onexec then the transition is delayed until
* the next exec.
*
* Returns %0 on success, error otherwise.
*/
int aa_change_profile(const char *fqname, int flags)
{
struct aa_label *label, *new = NULL, *target = NULL;
struct aa_profile *profile;
struct aa_perms perms = {};
const char *info = NULL;
const char *auditname = fqname; /* retain leading & if stack */
bool stack = flags & AA_CHANGE_STACK;
struct aa_task_ctx *ctx = task_ctx(current);
int error = 0;
char *op;
u32 request;
label = aa_get_current_label();
/*
* Detect no new privs being set, and store the label it
* occurred under. Ideally this would happen when nnp
* is set but there isn't a good way to do that yet.
*
* Testing for unconfined must be done before the subset test
*/
if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp)
ctx->nnp = aa_get_label(label);
if (!fqname || !*fqname) {
aa_put_label(label);
AA_DEBUG("no profile name");
return -EINVAL;
}
if (flags & AA_CHANGE_ONEXEC) {
request = AA_MAY_ONEXEC;
if (stack)
op = OP_STACK_ONEXEC;
else
op = OP_CHANGE_ONEXEC;
} else {
request = AA_MAY_CHANGE_PROFILE;
if (stack)
op = OP_STACK;
else
op = OP_CHANGE_PROFILE;
}
if (*fqname == '&') {
stack = true;
/* don't have label_parse() do stacking */
fqname++;
}
target = aa_label_parse(label, fqname, GFP_KERNEL, true, false);
if (IS_ERR(target)) {
struct aa_profile *tprofile;
info = "label not found";
error = PTR_ERR(target);
target = NULL;
/*
* TODO: fixme using labels_profile is not right - do profile
* per complain profile
*/
if ((flags & AA_CHANGE_TEST) ||
!COMPLAIN_MODE(labels_profile(label)))
goto audit;
/* released below */
tprofile = aa_new_learning_profile(labels_profile(label), false,
fqname, GFP_KERNEL);
if (!tprofile) {
info = "failed null profile create";
error = -ENOMEM;
goto audit;
}
target = &tprofile->label;
goto check;
}
/*
* self directed transitions only apply to current policy ns
* TODO: currently requiring perms for stacking and straight change
* stacking doesn't strictly need this. Determine how much
* we want to loosen this restriction for stacking
*
* if (!stack) {
*/
error = fn_for_each_in_ns(label, profile,
change_profile_perms_wrapper(op, auditname,
profile, target, stack,
request, &perms));
if (error)
/* auditing done in change_profile_perms_wrapper */
goto out;
/* } */
check:
/* check if tracing task is allowed to trace target domain */
error = may_change_ptraced_domain(target, &info);
if (error && !fn_for_each_in_ns(label, profile,
COMPLAIN_MODE(profile)))
goto audit;
/* TODO: add permission check to allow this
* if ((flags & AA_CHANGE_ONEXEC) && !current_is_single_threaded()) {
* info = "not a single threaded task";
* error = -EACCES;
* goto audit;
* }
*/
if (flags & AA_CHANGE_TEST)
goto out;
/* stacking is always a subset, so only check the nonstack case */
if (!stack) {
new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_get_label(target),
aa_get_label(&profile->label));
/*
* no new privs prevents domain transitions that would
* reduce restrictions.
*/
if (task_no_new_privs(current) && !unconfined(label) &&
!aa_label_is_unconfined_subset(new, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
AA_DEBUG("no_new_privs - change_hat denied");
error = -EPERM;
goto out;
}
}
if (!(flags & AA_CHANGE_ONEXEC)) {
/* only transition profiles in the current ns */
if (stack)
new = aa_label_merge(label, target, GFP_KERNEL);
if (IS_ERR_OR_NULL(new)) {
info = "failed to build target label";
if (!new)
error = -ENOMEM;
else
error = PTR_ERR(new);
new = NULL;
perms.allow = 0;
goto audit;
}
error = aa_replace_current_label(new);
} else {
if (new) {
aa_put_label(new);
new = NULL;
}
/* full transition will be built in exec path */
error = aa_set_current_onexec(target, stack);
}
audit:
error = fn_for_each_in_ns(label, profile,
aa_audit_file(profile, &perms, op, request, auditname,
NULL, new ? new : target,
GLOBAL_ROOT_UID, info, error));
out:
aa_put_label(new);
aa_put_label(target);
aa_put_label(label);
return error;
}
| linux-master | security/apparmor/domain.c |
// SPDX-License-Identifier: GPL-2.0
/*
* security/tomoyo/group.c
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include <linux/slab.h>
#include <linux/rculist.h>
#include "common.h"
/**
* tomoyo_same_path_group - Check for duplicated "struct tomoyo_path_group" entry.
*
* @a: Pointer to "struct tomoyo_acl_head".
* @b: Pointer to "struct tomoyo_acl_head".
*
* Returns true if @a == @b, false otherwise.
*/
static bool tomoyo_same_path_group(const struct tomoyo_acl_head *a,
const struct tomoyo_acl_head *b)
{
return container_of(a, struct tomoyo_path_group, head)->member_name ==
container_of(b, struct tomoyo_path_group, head)->member_name;
}
/**
* tomoyo_same_number_group - Check for duplicated "struct tomoyo_number_group" entry.
*
* @a: Pointer to "struct tomoyo_acl_head".
* @b: Pointer to "struct tomoyo_acl_head".
*
* Returns true if @a == @b, false otherwise.
*/
static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a,
const struct tomoyo_acl_head *b)
{
return !memcmp(&container_of(a, struct tomoyo_number_group, head)
->number,
&container_of(b, struct tomoyo_number_group, head)
->number,
sizeof(container_of(a, struct tomoyo_number_group, head)
->number));
}
/**
* tomoyo_same_address_group - Check for duplicated "struct tomoyo_address_group" entry.
*
* @a: Pointer to "struct tomoyo_acl_head".
* @b: Pointer to "struct tomoyo_acl_head".
*
* Returns true if @a == @b, false otherwise.
*/
static bool tomoyo_same_address_group(const struct tomoyo_acl_head *a,
const struct tomoyo_acl_head *b)
{
const struct tomoyo_address_group *p1 = container_of(a, typeof(*p1),
head);
const struct tomoyo_address_group *p2 = container_of(b, typeof(*p2),
head);
return tomoyo_same_ipaddr_union(&p1->address, &p2->address);
}
/**
* tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
*
* @param: Pointer to "struct tomoyo_acl_param".
* @type: Type of this group.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type)
{
struct tomoyo_group *group = tomoyo_get_group(param, type);
int error = -EINVAL;
if (!group)
return -ENOMEM;
param->list = &group->member_list;
if (type == TOMOYO_PATH_GROUP) {
struct tomoyo_path_group e = { };
e.member_name = tomoyo_get_name(tomoyo_read_token(param));
if (!e.member_name) {
error = -ENOMEM;
goto out;
}
error = tomoyo_update_policy(&e.head, sizeof(e), param,
tomoyo_same_path_group);
tomoyo_put_name(e.member_name);
} else if (type == TOMOYO_NUMBER_GROUP) {
struct tomoyo_number_group e = { };
if (param->data[0] == '@' ||
!tomoyo_parse_number_union(param, &e.number))
goto out;
error = tomoyo_update_policy(&e.head, sizeof(e), param,
tomoyo_same_number_group);
/*
* tomoyo_put_number_union() is not needed because
* param->data[0] != '@'.
*/
} else {
struct tomoyo_address_group e = { };
if (param->data[0] == '@' ||
!tomoyo_parse_ipaddr_union(param, &e.address))
goto out;
error = tomoyo_update_policy(&e.head, sizeof(e), param,
tomoyo_same_address_group);
}
out:
tomoyo_put_group(group);
return error;
}
/**
* tomoyo_path_matches_group - Check whether the given pathname matches members of the given pathname group.
*
* @pathname: The name of pathname.
* @group: Pointer to "struct tomoyo_path_group".
*
* Returns matched member's pathname if @pathname matches pathnames in @group,
* NULL otherwise.
*
* Caller holds tomoyo_read_lock().
*/
const struct tomoyo_path_info *
tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
const struct tomoyo_group *group)
{
struct tomoyo_path_group *member;
list_for_each_entry_rcu(member, &group->member_list, head.list,
srcu_read_lock_held(&tomoyo_ss)) {
if (member->head.is_deleted)
continue;
if (!tomoyo_path_matches_pattern(pathname, member->member_name))
continue;
return member->member_name;
}
return NULL;
}
/**
* tomoyo_number_matches_group - Check whether the given number matches members of the given number group.
*
* @min: Min number.
* @max: Max number.
* @group: Pointer to "struct tomoyo_number_group".
*
* Returns true if @min and @max partially overlaps @group, false otherwise.
*
* Caller holds tomoyo_read_lock().
*/
bool tomoyo_number_matches_group(const unsigned long min,
const unsigned long max,
const struct tomoyo_group *group)
{
struct tomoyo_number_group *member;
bool matched = false;
list_for_each_entry_rcu(member, &group->member_list, head.list,
srcu_read_lock_held(&tomoyo_ss)) {
if (member->head.is_deleted)
continue;
if (min > member->number.values[1] ||
max < member->number.values[0])
continue;
matched = true;
break;
}
return matched;
}
/**
* tomoyo_address_matches_group - Check whether the given address matches members of the given address group.
*
* @is_ipv6: True if @address is an IPv6 address.
* @address: An IPv4 or IPv6 address.
* @group: Pointer to "struct tomoyo_address_group".
*
* Returns true if @address matches addresses in @group group, false otherwise.
*
* Caller holds tomoyo_read_lock().
*/
bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
const struct tomoyo_group *group)
{
struct tomoyo_address_group *member;
bool matched = false;
const u8 size = is_ipv6 ? 16 : 4;
list_for_each_entry_rcu(member, &group->member_list, head.list,
srcu_read_lock_held(&tomoyo_ss)) {
if (member->head.is_deleted)
continue;
if (member->address.is_ipv6 != is_ipv6)
continue;
if (memcmp(&member->address.ip[0], address, size) > 0 ||
memcmp(address, &member->address.ip[1], size) > 0)
continue;
matched = true;
break;
}
return matched;
}
| linux-master | security/tomoyo/group.c |
// SPDX-License-Identifier: GPL-2.0
/*
* security/tomoyo/network.c
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include "common.h"
#include <linux/slab.h>
/* Structure for holding inet domain socket's address. */
struct tomoyo_inet_addr_info {
__be16 port; /* In network byte order. */
const __be32 *address; /* In network byte order. */
bool is_ipv6;
};
/* Structure for holding unix domain socket's address. */
struct tomoyo_unix_addr_info {
u8 *addr; /* This may not be '\0' terminated string. */
unsigned int addr_len;
};
/* Structure for holding socket address. */
struct tomoyo_addr_info {
u8 protocol;
u8 operation;
struct tomoyo_inet_addr_info inet;
struct tomoyo_unix_addr_info unix0;
};
/* String table for socket's protocols. */
const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX] = {
[SOCK_STREAM] = "stream",
[SOCK_DGRAM] = "dgram",
[SOCK_RAW] = "raw",
[SOCK_SEQPACKET] = "seqpacket",
[0] = " ", /* Dummy for avoiding NULL pointer dereference. */
[4] = " ", /* Dummy for avoiding NULL pointer dereference. */
};
/**
* tomoyo_parse_ipaddr_union - Parse an IP address.
*
* @param: Pointer to "struct tomoyo_acl_param".
* @ptr: Pointer to "struct tomoyo_ipaddr_union".
*
* Returns true on success, false otherwise.
*/
bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
struct tomoyo_ipaddr_union *ptr)
{
u8 * const min = ptr->ip[0].in6_u.u6_addr8;
u8 * const max = ptr->ip[1].in6_u.u6_addr8;
char *address = tomoyo_read_token(param);
const char *end;
if (!strchr(address, ':') &&
in4_pton(address, -1, min, '-', &end) > 0) {
ptr->is_ipv6 = false;
if (!*end)
ptr->ip[1].s6_addr32[0] = ptr->ip[0].s6_addr32[0];
else if (*end++ != '-' ||
in4_pton(end, -1, max, '\0', &end) <= 0 || *end)
return false;
return true;
}
if (in6_pton(address, -1, min, '-', &end) > 0) {
ptr->is_ipv6 = true;
if (!*end)
memmove(max, min, sizeof(u16) * 8);
else if (*end++ != '-' ||
in6_pton(end, -1, max, '\0', &end) <= 0 || *end)
return false;
return true;
}
return false;
}
/**
* tomoyo_print_ipv4 - Print an IPv4 address.
*
* @buffer: Buffer to write to.
* @buffer_len: Size of @buffer.
* @min_ip: Pointer to __be32.
* @max_ip: Pointer to __be32.
*
* Returns nothing.
*/
static void tomoyo_print_ipv4(char *buffer, const unsigned int buffer_len,
const __be32 *min_ip, const __be32 *max_ip)
{
snprintf(buffer, buffer_len, "%pI4%c%pI4", min_ip,
*min_ip == *max_ip ? '\0' : '-', max_ip);
}
/**
* tomoyo_print_ipv6 - Print an IPv6 address.
*
* @buffer: Buffer to write to.
* @buffer_len: Size of @buffer.
* @min_ip: Pointer to "struct in6_addr".
* @max_ip: Pointer to "struct in6_addr".
*
* Returns nothing.
*/
static void tomoyo_print_ipv6(char *buffer, const unsigned int buffer_len,
const struct in6_addr *min_ip,
const struct in6_addr *max_ip)
{
snprintf(buffer, buffer_len, "%pI6c%c%pI6c", min_ip,
!memcmp(min_ip, max_ip, 16) ? '\0' : '-', max_ip);
}
/**
* tomoyo_print_ip - Print an IP address.
*
* @buf: Buffer to write to.
* @size: Size of @buf.
* @ptr: Pointer to "struct ipaddr_union".
*
* Returns nothing.
*/
void tomoyo_print_ip(char *buf, const unsigned int size,
const struct tomoyo_ipaddr_union *ptr)
{
if (ptr->is_ipv6)
tomoyo_print_ipv6(buf, size, &ptr->ip[0], &ptr->ip[1]);
else
tomoyo_print_ipv4(buf, size, &ptr->ip[0].s6_addr32[0],
&ptr->ip[1].s6_addr32[0]);
}
/*
* Mapping table from "enum tomoyo_network_acl_index" to
* "enum tomoyo_mac_index" for inet domain socket.
*/
static const u8 tomoyo_inet2mac
[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
[SOCK_STREAM] = {
[TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
[TOMOYO_NETWORK_LISTEN] =
TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
[TOMOYO_NETWORK_CONNECT] =
TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
},
[SOCK_DGRAM] = {
[TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
[TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
},
[SOCK_RAW] = {
[TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_RAW_BIND,
[TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_RAW_SEND,
},
};
/*
* Mapping table from "enum tomoyo_network_acl_index" to
* "enum tomoyo_mac_index" for unix domain socket.
*/
static const u8 tomoyo_unix2mac
[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
[SOCK_STREAM] = {
[TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
[TOMOYO_NETWORK_LISTEN] =
TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
[TOMOYO_NETWORK_CONNECT] =
TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
},
[SOCK_DGRAM] = {
[TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
[TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
},
[SOCK_SEQPACKET] = {
[TOMOYO_NETWORK_BIND] =
TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
[TOMOYO_NETWORK_LISTEN] =
TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
[TOMOYO_NETWORK_CONNECT] =
TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
},
};
/**
* tomoyo_same_inet_acl - Check for duplicated "struct tomoyo_inet_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
*
* Returns true if @a == @b except permission bits, false otherwise.
*/
static bool tomoyo_same_inet_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head);
return p1->protocol == p2->protocol &&
tomoyo_same_ipaddr_union(&p1->address, &p2->address) &&
tomoyo_same_number_union(&p1->port, &p2->port);
}
/**
* tomoyo_same_unix_acl - Check for duplicated "struct tomoyo_unix_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
*
* Returns true if @a == @b except permission bits, false otherwise.
*/
static bool tomoyo_same_unix_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head);
return p1->protocol == p2->protocol &&
tomoyo_same_name_union(&p1->name, &p2->name);
}
/**
* tomoyo_merge_inet_acl - Merge duplicated "struct tomoyo_inet_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
* @is_delete: True for @a &= ~@b, false for @a |= @b.
*
* Returns true if @a is empty, false otherwise.
*/
static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a,
struct tomoyo_acl_info *b,
const bool is_delete)
{
u8 * const a_perm =
&container_of(a, struct tomoyo_inet_acl, head)->perm;
u8 perm = READ_ONCE(*a_perm);
const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm;
if (is_delete)
perm &= ~b_perm;
else
perm |= b_perm;
WRITE_ONCE(*a_perm, perm);
return !perm;
}
/**
* tomoyo_merge_unix_acl - Merge duplicated "struct tomoyo_unix_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
* @is_delete: True for @a &= ~@b, false for @a |= @b.
*
* Returns true if @a is empty, false otherwise.
*/
static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a,
struct tomoyo_acl_info *b,
const bool is_delete)
{
u8 * const a_perm =
&container_of(a, struct tomoyo_unix_acl, head)->perm;
u8 perm = READ_ONCE(*a_perm);
const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm;
if (is_delete)
perm &= ~b_perm;
else
perm |= b_perm;
WRITE_ONCE(*a_perm, perm);
return !perm;
}
/**
* tomoyo_write_inet_network - Write "struct tomoyo_inet_acl" list.
*
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
int tomoyo_write_inet_network(struct tomoyo_acl_param *param)
{
struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL };
int error = -EINVAL;
u8 type;
const char *protocol = tomoyo_read_token(param);
const char *operation = tomoyo_read_token(param);
for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
break;
for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
e.perm |= 1 << type;
if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
return -EINVAL;
if (param->data[0] == '@') {
param->data++;
e.address.group =
tomoyo_get_group(param, TOMOYO_ADDRESS_GROUP);
if (!e.address.group)
return -ENOMEM;
} else {
if (!tomoyo_parse_ipaddr_union(param, &e.address))
goto out;
}
if (!tomoyo_parse_number_union(param, &e.port) ||
e.port.values[1] > 65535)
goto out;
error = tomoyo_update_domain(&e.head, sizeof(e), param,
tomoyo_same_inet_acl,
tomoyo_merge_inet_acl);
out:
tomoyo_put_group(e.address.group);
tomoyo_put_number_union(&e.port);
return error;
}
/**
* tomoyo_write_unix_network - Write "struct tomoyo_unix_acl" list.
*
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_write_unix_network(struct tomoyo_acl_param *param)
{
struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL };
int error;
u8 type;
const char *protocol = tomoyo_read_token(param);
const char *operation = tomoyo_read_token(param);
for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
break;
for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
e.perm |= 1 << type;
if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
return -EINVAL;
if (!tomoyo_parse_name_union(param, &e.name))
return -EINVAL;
error = tomoyo_update_domain(&e.head, sizeof(e), param,
tomoyo_same_unix_acl,
tomoyo_merge_unix_acl);
tomoyo_put_name_union(&e.name);
return error;
}
/**
* tomoyo_audit_net_log - Audit network log.
*
* @r: Pointer to "struct tomoyo_request_info".
* @family: Name of socket family ("inet" or "unix").
* @protocol: Name of protocol in @family.
* @operation: Name of socket operation.
* @address: Name of address.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_audit_net_log(struct tomoyo_request_info *r,
const char *family, const u8 protocol,
const u8 operation, const char *address)
{
return tomoyo_supervisor(r, "network %s %s %s %s\n", family,
tomoyo_proto_keyword[protocol],
tomoyo_socket_keyword[operation], address);
}
/**
* tomoyo_audit_inet_log - Audit INET network log.
*
* @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_audit_inet_log(struct tomoyo_request_info *r)
{
char buf[128];
int len;
const __be32 *address = r->param.inet_network.address;
if (r->param.inet_network.is_ipv6)
tomoyo_print_ipv6(buf, sizeof(buf), (const struct in6_addr *)
address, (const struct in6_addr *) address);
else
tomoyo_print_ipv4(buf, sizeof(buf), address, address);
len = strlen(buf);
snprintf(buf + len, sizeof(buf) - len, " %u",
r->param.inet_network.port);
return tomoyo_audit_net_log(r, "inet", r->param.inet_network.protocol,
r->param.inet_network.operation, buf);
}
/**
* tomoyo_audit_unix_log - Audit UNIX network log.
*
* @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_audit_unix_log(struct tomoyo_request_info *r)
{
return tomoyo_audit_net_log(r, "unix", r->param.unix_network.protocol,
r->param.unix_network.operation,
r->param.unix_network.address->name);
}
/**
* tomoyo_check_inet_acl - Check permission for inet domain socket operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @ptr: Pointer to "struct tomoyo_acl_info".
*
* Returns true if granted, false otherwise.
*/
static bool tomoyo_check_inet_acl(struct tomoyo_request_info *r,
const struct tomoyo_acl_info *ptr)
{
const struct tomoyo_inet_acl *acl =
container_of(ptr, typeof(*acl), head);
const u8 size = r->param.inet_network.is_ipv6 ? 16 : 4;
if (!(acl->perm & (1 << r->param.inet_network.operation)) ||
!tomoyo_compare_number_union(r->param.inet_network.port,
&acl->port))
return false;
if (acl->address.group)
return tomoyo_address_matches_group
(r->param.inet_network.is_ipv6,
r->param.inet_network.address, acl->address.group);
return acl->address.is_ipv6 == r->param.inet_network.is_ipv6 &&
memcmp(&acl->address.ip[0],
r->param.inet_network.address, size) <= 0 &&
memcmp(r->param.inet_network.address,
&acl->address.ip[1], size) <= 0;
}
/**
* tomoyo_check_unix_acl - Check permission for unix domain socket operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @ptr: Pointer to "struct tomoyo_acl_info".
*
* Returns true if granted, false otherwise.
*/
static bool tomoyo_check_unix_acl(struct tomoyo_request_info *r,
const struct tomoyo_acl_info *ptr)
{
const struct tomoyo_unix_acl *acl =
container_of(ptr, typeof(*acl), head);
return (acl->perm & (1 << r->param.unix_network.operation)) &&
tomoyo_compare_name_union(r->param.unix_network.address,
&acl->name);
}
/**
* tomoyo_inet_entry - Check permission for INET network operation.
*
* @address: Pointer to "struct tomoyo_addr_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_inet_entry(const struct tomoyo_addr_info *address)
{
const int idx = tomoyo_read_lock();
struct tomoyo_request_info r;
int error = 0;
const u8 type = tomoyo_inet2mac[address->protocol][address->operation];
if (type && tomoyo_init_request_info(&r, NULL, type)
!= TOMOYO_CONFIG_DISABLED) {
r.param_type = TOMOYO_TYPE_INET_ACL;
r.param.inet_network.protocol = address->protocol;
r.param.inet_network.operation = address->operation;
r.param.inet_network.is_ipv6 = address->inet.is_ipv6;
r.param.inet_network.address = address->inet.address;
r.param.inet_network.port = ntohs(address->inet.port);
do {
tomoyo_check_acl(&r, tomoyo_check_inet_acl);
error = tomoyo_audit_inet_log(&r);
} while (error == TOMOYO_RETRY_REQUEST);
}
tomoyo_read_unlock(idx);
return error;
}
/**
* tomoyo_check_inet_address - Check permission for inet domain socket's operation.
*
* @addr: Pointer to "struct sockaddr".
* @addr_len: Size of @addr.
* @port: Port number.
* @address: Pointer to "struct tomoyo_addr_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_check_inet_address(const struct sockaddr *addr,
const unsigned int addr_len,
const u16 port,
struct tomoyo_addr_info *address)
{
struct tomoyo_inet_addr_info *i = &address->inet;
if (addr_len < offsetofend(struct sockaddr, sa_family))
return 0;
switch (addr->sa_family) {
case AF_INET6:
if (addr_len < SIN6_LEN_RFC2133)
goto skip;
i->is_ipv6 = true;
i->address = (__be32 *)
((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr;
i->port = ((struct sockaddr_in6 *) addr)->sin6_port;
break;
case AF_INET:
if (addr_len < sizeof(struct sockaddr_in))
goto skip;
i->is_ipv6 = false;
i->address = (__be32 *)
&((struct sockaddr_in *) addr)->sin_addr;
i->port = ((struct sockaddr_in *) addr)->sin_port;
break;
default:
goto skip;
}
if (address->protocol == SOCK_RAW)
i->port = htons(port);
return tomoyo_inet_entry(address);
skip:
return 0;
}
/**
* tomoyo_unix_entry - Check permission for UNIX network operation.
*
* @address: Pointer to "struct tomoyo_addr_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_unix_entry(const struct tomoyo_addr_info *address)
{
const int idx = tomoyo_read_lock();
struct tomoyo_request_info r;
int error = 0;
const u8 type = tomoyo_unix2mac[address->protocol][address->operation];
if (type && tomoyo_init_request_info(&r, NULL, type)
!= TOMOYO_CONFIG_DISABLED) {
char *buf = address->unix0.addr;
int len = address->unix0.addr_len - sizeof(sa_family_t);
if (len <= 0) {
buf = "anonymous";
len = 9;
} else if (buf[0]) {
len = strnlen(buf, len);
}
buf = tomoyo_encode2(buf, len);
if (buf) {
struct tomoyo_path_info addr;
addr.name = buf;
tomoyo_fill_path_info(&addr);
r.param_type = TOMOYO_TYPE_UNIX_ACL;
r.param.unix_network.protocol = address->protocol;
r.param.unix_network.operation = address->operation;
r.param.unix_network.address = &addr;
do {
tomoyo_check_acl(&r, tomoyo_check_unix_acl);
error = tomoyo_audit_unix_log(&r);
} while (error == TOMOYO_RETRY_REQUEST);
kfree(buf);
} else
error = -ENOMEM;
}
tomoyo_read_unlock(idx);
return error;
}
/**
* tomoyo_check_unix_address - Check permission for unix domain socket's operation.
*
* @addr: Pointer to "struct sockaddr".
* @addr_len: Size of @addr.
* @address: Pointer to "struct tomoyo_addr_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_check_unix_address(struct sockaddr *addr,
const unsigned int addr_len,
struct tomoyo_addr_info *address)
{
struct tomoyo_unix_addr_info *u = &address->unix0;
if (addr_len < offsetofend(struct sockaddr, sa_family))
return 0;
if (addr->sa_family != AF_UNIX)
return 0;
u->addr = ((struct sockaddr_un *) addr)->sun_path;
u->addr_len = addr_len;
return tomoyo_unix_entry(address);
}
/**
* tomoyo_kernel_service - Check whether I'm kernel service or not.
*
* Returns true if I'm kernel service, false otherwise.
*/
static bool tomoyo_kernel_service(void)
{
/* Nothing to do if I am a kernel service. */
return current->flags & PF_KTHREAD;
}
/**
* tomoyo_sock_family - Get socket's family.
*
* @sk: Pointer to "struct sock".
*
* Returns one of PF_INET, PF_INET6, PF_UNIX or 0.
*/
static u8 tomoyo_sock_family(struct sock *sk)
{
u8 family;
if (tomoyo_kernel_service())
return 0;
family = sk->sk_family;
switch (family) {
case PF_INET:
case PF_INET6:
case PF_UNIX:
return family;
default:
return 0;
}
}
/**
* tomoyo_socket_listen_permission - Check permission for listening a socket.
*
* @sock: Pointer to "struct socket".
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_socket_listen_permission(struct socket *sock)
{
struct tomoyo_addr_info address;
const u8 family = tomoyo_sock_family(sock->sk);
const unsigned int type = sock->type;
struct sockaddr_storage addr;
int addr_len;
if (!family || (type != SOCK_STREAM && type != SOCK_SEQPACKET))
return 0;
{
const int error = sock->ops->getname(sock, (struct sockaddr *)
&addr, 0);
if (error < 0)
return error;
addr_len = error;
}
address.protocol = type;
address.operation = TOMOYO_NETWORK_LISTEN;
if (family == PF_UNIX)
return tomoyo_check_unix_address((struct sockaddr *) &addr,
addr_len, &address);
return tomoyo_check_inet_address((struct sockaddr *) &addr, addr_len,
0, &address);
}
/**
* tomoyo_socket_connect_permission - Check permission for setting the remote address of a socket.
*
* @sock: Pointer to "struct socket".
* @addr: Pointer to "struct sockaddr".
* @addr_len: Size of @addr.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_socket_connect_permission(struct socket *sock,
struct sockaddr *addr, int addr_len)
{
struct tomoyo_addr_info address;
const u8 family = tomoyo_sock_family(sock->sk);
const unsigned int type = sock->type;
if (!family)
return 0;
address.protocol = type;
switch (type) {
case SOCK_DGRAM:
case SOCK_RAW:
address.operation = TOMOYO_NETWORK_SEND;
break;
case SOCK_STREAM:
case SOCK_SEQPACKET:
address.operation = TOMOYO_NETWORK_CONNECT;
break;
default:
return 0;
}
if (family == PF_UNIX)
return tomoyo_check_unix_address(addr, addr_len, &address);
return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
&address);
}
/**
* tomoyo_socket_bind_permission - Check permission for setting the local address of a socket.
*
* @sock: Pointer to "struct socket".
* @addr: Pointer to "struct sockaddr".
* @addr_len: Size of @addr.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
struct tomoyo_addr_info address;
const u8 family = tomoyo_sock_family(sock->sk);
const unsigned int type = sock->type;
if (!family)
return 0;
switch (type) {
case SOCK_STREAM:
case SOCK_DGRAM:
case SOCK_RAW:
case SOCK_SEQPACKET:
address.protocol = type;
address.operation = TOMOYO_NETWORK_BIND;
break;
default:
return 0;
}
if (family == PF_UNIX)
return tomoyo_check_unix_address(addr, addr_len, &address);
return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
&address);
}
/**
* tomoyo_socket_sendmsg_permission - Check permission for sending a datagram.
*
* @sock: Pointer to "struct socket".
* @msg: Pointer to "struct msghdr".
* @size: Unused.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
int size)
{
struct tomoyo_addr_info address;
const u8 family = tomoyo_sock_family(sock->sk);
const unsigned int type = sock->type;
if (!msg->msg_name || !family ||
(type != SOCK_DGRAM && type != SOCK_RAW))
return 0;
address.protocol = type;
address.operation = TOMOYO_NETWORK_SEND;
if (family == PF_UNIX)
return tomoyo_check_unix_address((struct sockaddr *)
msg->msg_name,
msg->msg_namelen, &address);
return tomoyo_check_inet_address((struct sockaddr *) msg->msg_name,
msg->msg_namelen,
sock->sk->sk_protocol, &address);
}
| linux-master | security/tomoyo/network.c |
// SPDX-License-Identifier: GPL-2.0
/*
* security/tomoyo/common.c
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/string_helpers.h>
#include "common.h"
/* String table for operation mode. */
const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = {
[TOMOYO_CONFIG_DISABLED] = "disabled",
[TOMOYO_CONFIG_LEARNING] = "learning",
[TOMOYO_CONFIG_PERMISSIVE] = "permissive",
[TOMOYO_CONFIG_ENFORCING] = "enforcing"
};
/* String table for /sys/kernel/security/tomoyo/profile */
const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
/* CONFIG::file group */
[TOMOYO_MAC_FILE_EXECUTE] = "execute",
[TOMOYO_MAC_FILE_OPEN] = "open",
[TOMOYO_MAC_FILE_CREATE] = "create",
[TOMOYO_MAC_FILE_UNLINK] = "unlink",
[TOMOYO_MAC_FILE_GETATTR] = "getattr",
[TOMOYO_MAC_FILE_MKDIR] = "mkdir",
[TOMOYO_MAC_FILE_RMDIR] = "rmdir",
[TOMOYO_MAC_FILE_MKFIFO] = "mkfifo",
[TOMOYO_MAC_FILE_MKSOCK] = "mksock",
[TOMOYO_MAC_FILE_TRUNCATE] = "truncate",
[TOMOYO_MAC_FILE_SYMLINK] = "symlink",
[TOMOYO_MAC_FILE_MKBLOCK] = "mkblock",
[TOMOYO_MAC_FILE_MKCHAR] = "mkchar",
[TOMOYO_MAC_FILE_LINK] = "link",
[TOMOYO_MAC_FILE_RENAME] = "rename",
[TOMOYO_MAC_FILE_CHMOD] = "chmod",
[TOMOYO_MAC_FILE_CHOWN] = "chown",
[TOMOYO_MAC_FILE_CHGRP] = "chgrp",
[TOMOYO_MAC_FILE_IOCTL] = "ioctl",
[TOMOYO_MAC_FILE_CHROOT] = "chroot",
[TOMOYO_MAC_FILE_MOUNT] = "mount",
[TOMOYO_MAC_FILE_UMOUNT] = "unmount",
[TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root",
/* CONFIG::network group */
[TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = "inet_stream_bind",
[TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = "inet_stream_listen",
[TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = "inet_stream_connect",
[TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = "inet_dgram_bind",
[TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = "inet_dgram_send",
[TOMOYO_MAC_NETWORK_INET_RAW_BIND] = "inet_raw_bind",
[TOMOYO_MAC_NETWORK_INET_RAW_SEND] = "inet_raw_send",
[TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = "unix_stream_bind",
[TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = "unix_stream_listen",
[TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = "unix_stream_connect",
[TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = "unix_dgram_bind",
[TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = "unix_dgram_send",
[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = "unix_seqpacket_bind",
[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = "unix_seqpacket_listen",
[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = "unix_seqpacket_connect",
/* CONFIG::misc group */
[TOMOYO_MAC_ENVIRON] = "env",
/* CONFIG group */
[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file",
[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_NETWORK] = "network",
[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_MISC] = "misc",
};
/* String table for conditions. */
const char * const tomoyo_condition_keyword[TOMOYO_MAX_CONDITION_KEYWORD] = {
[TOMOYO_TASK_UID] = "task.uid",
[TOMOYO_TASK_EUID] = "task.euid",
[TOMOYO_TASK_SUID] = "task.suid",
[TOMOYO_TASK_FSUID] = "task.fsuid",
[TOMOYO_TASK_GID] = "task.gid",
[TOMOYO_TASK_EGID] = "task.egid",
[TOMOYO_TASK_SGID] = "task.sgid",
[TOMOYO_TASK_FSGID] = "task.fsgid",
[TOMOYO_TASK_PID] = "task.pid",
[TOMOYO_TASK_PPID] = "task.ppid",
[TOMOYO_EXEC_ARGC] = "exec.argc",
[TOMOYO_EXEC_ENVC] = "exec.envc",
[TOMOYO_TYPE_IS_SOCKET] = "socket",
[TOMOYO_TYPE_IS_SYMLINK] = "symlink",
[TOMOYO_TYPE_IS_FILE] = "file",
[TOMOYO_TYPE_IS_BLOCK_DEV] = "block",
[TOMOYO_TYPE_IS_DIRECTORY] = "directory",
[TOMOYO_TYPE_IS_CHAR_DEV] = "char",
[TOMOYO_TYPE_IS_FIFO] = "fifo",
[TOMOYO_MODE_SETUID] = "setuid",
[TOMOYO_MODE_SETGID] = "setgid",
[TOMOYO_MODE_STICKY] = "sticky",
[TOMOYO_MODE_OWNER_READ] = "owner_read",
[TOMOYO_MODE_OWNER_WRITE] = "owner_write",
[TOMOYO_MODE_OWNER_EXECUTE] = "owner_execute",
[TOMOYO_MODE_GROUP_READ] = "group_read",
[TOMOYO_MODE_GROUP_WRITE] = "group_write",
[TOMOYO_MODE_GROUP_EXECUTE] = "group_execute",
[TOMOYO_MODE_OTHERS_READ] = "others_read",
[TOMOYO_MODE_OTHERS_WRITE] = "others_write",
[TOMOYO_MODE_OTHERS_EXECUTE] = "others_execute",
[TOMOYO_EXEC_REALPATH] = "exec.realpath",
[TOMOYO_SYMLINK_TARGET] = "symlink.target",
[TOMOYO_PATH1_UID] = "path1.uid",
[TOMOYO_PATH1_GID] = "path1.gid",
[TOMOYO_PATH1_INO] = "path1.ino",
[TOMOYO_PATH1_MAJOR] = "path1.major",
[TOMOYO_PATH1_MINOR] = "path1.minor",
[TOMOYO_PATH1_PERM] = "path1.perm",
[TOMOYO_PATH1_TYPE] = "path1.type",
[TOMOYO_PATH1_DEV_MAJOR] = "path1.dev_major",
[TOMOYO_PATH1_DEV_MINOR] = "path1.dev_minor",
[TOMOYO_PATH2_UID] = "path2.uid",
[TOMOYO_PATH2_GID] = "path2.gid",
[TOMOYO_PATH2_INO] = "path2.ino",
[TOMOYO_PATH2_MAJOR] = "path2.major",
[TOMOYO_PATH2_MINOR] = "path2.minor",
[TOMOYO_PATH2_PERM] = "path2.perm",
[TOMOYO_PATH2_TYPE] = "path2.type",
[TOMOYO_PATH2_DEV_MAJOR] = "path2.dev_major",
[TOMOYO_PATH2_DEV_MINOR] = "path2.dev_minor",
[TOMOYO_PATH1_PARENT_UID] = "path1.parent.uid",
[TOMOYO_PATH1_PARENT_GID] = "path1.parent.gid",
[TOMOYO_PATH1_PARENT_INO] = "path1.parent.ino",
[TOMOYO_PATH1_PARENT_PERM] = "path1.parent.perm",
[TOMOYO_PATH2_PARENT_UID] = "path2.parent.uid",
[TOMOYO_PATH2_PARENT_GID] = "path2.parent.gid",
[TOMOYO_PATH2_PARENT_INO] = "path2.parent.ino",
[TOMOYO_PATH2_PARENT_PERM] = "path2.parent.perm",
};
/* String table for PREFERENCE keyword. */
static const char * const tomoyo_pref_keywords[TOMOYO_MAX_PREF] = {
[TOMOYO_PREF_MAX_AUDIT_LOG] = "max_audit_log",
[TOMOYO_PREF_MAX_LEARNING_ENTRY] = "max_learning_entry",
};
/* String table for path operation. */
const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
[TOMOYO_TYPE_EXECUTE] = "execute",
[TOMOYO_TYPE_READ] = "read",
[TOMOYO_TYPE_WRITE] = "write",
[TOMOYO_TYPE_APPEND] = "append",
[TOMOYO_TYPE_UNLINK] = "unlink",
[TOMOYO_TYPE_GETATTR] = "getattr",
[TOMOYO_TYPE_RMDIR] = "rmdir",
[TOMOYO_TYPE_TRUNCATE] = "truncate",
[TOMOYO_TYPE_SYMLINK] = "symlink",
[TOMOYO_TYPE_CHROOT] = "chroot",
[TOMOYO_TYPE_UMOUNT] = "unmount",
};
/* String table for socket's operation. */
const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION] = {
[TOMOYO_NETWORK_BIND] = "bind",
[TOMOYO_NETWORK_LISTEN] = "listen",
[TOMOYO_NETWORK_CONNECT] = "connect",
[TOMOYO_NETWORK_SEND] = "send",
};
/* String table for categories. */
static const char * const tomoyo_category_keywords
[TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
[TOMOYO_MAC_CATEGORY_FILE] = "file",
[TOMOYO_MAC_CATEGORY_NETWORK] = "network",
[TOMOYO_MAC_CATEGORY_MISC] = "misc",
};
/* Permit policy management by non-root user? */
static bool tomoyo_manage_by_non_root;
/* Utility functions. */
/**
* tomoyo_addprintf - strncat()-like-snprintf().
*
* @buffer: Buffer to write to. Must be '\0'-terminated.
* @len: Size of @buffer.
* @fmt: The printf()'s format string, followed by parameters.
*
* Returns nothing.
*/
__printf(3, 4)
static void tomoyo_addprintf(char *buffer, int len, const char *fmt, ...)
{
va_list args;
const int pos = strlen(buffer);
va_start(args, fmt);
vsnprintf(buffer + pos, len - pos - 1, fmt, args);
va_end(args);
}
/**
* tomoyo_flush - Flush queued string to userspace's buffer.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true if all data was flushed, false otherwise.
*/
static bool tomoyo_flush(struct tomoyo_io_buffer *head)
{
while (head->r.w_pos) {
const char *w = head->r.w[0];
size_t len = strlen(w);
if (len) {
if (len > head->read_user_buf_avail)
len = head->read_user_buf_avail;
if (!len)
return false;
if (copy_to_user(head->read_user_buf, w, len))
return false;
head->read_user_buf_avail -= len;
head->read_user_buf += len;
w += len;
}
head->r.w[0] = w;
if (*w)
return false;
/* Add '\0' for audit logs and query. */
if (head->poll) {
if (!head->read_user_buf_avail ||
copy_to_user(head->read_user_buf, "", 1))
return false;
head->read_user_buf_avail--;
head->read_user_buf++;
}
head->r.w_pos--;
for (len = 0; len < head->r.w_pos; len++)
head->r.w[len] = head->r.w[len + 1];
}
head->r.avail = 0;
return true;
}
/**
* tomoyo_set_string - Queue string to "struct tomoyo_io_buffer" structure.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @string: String to print.
*
* Note that @string has to be kept valid until @head is kfree()d.
* This means that char[] allocated on stack memory cannot be passed to
* this function. Use tomoyo_io_printf() for char[] allocated on stack memory.
*/
static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string)
{
if (head->r.w_pos < TOMOYO_MAX_IO_READ_QUEUE) {
head->r.w[head->r.w_pos++] = string;
tomoyo_flush(head);
} else
WARN_ON(1);
}
static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
...) __printf(2, 3);
/**
* tomoyo_io_printf - printf() to "struct tomoyo_io_buffer" structure.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @fmt: The printf()'s format string, followed by parameters.
*/
static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
...)
{
va_list args;
size_t len;
size_t pos = head->r.avail;
int size = head->readbuf_size - pos;
if (size <= 0)
return;
va_start(args, fmt);
len = vsnprintf(head->read_buf + pos, size, fmt, args) + 1;
va_end(args);
if (pos + len >= head->readbuf_size) {
WARN_ON(1);
return;
}
head->r.avail += len;
tomoyo_set_string(head, head->read_buf + pos);
}
/**
* tomoyo_set_space - Put a space to "struct tomoyo_io_buffer" structure.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns nothing.
*/
static void tomoyo_set_space(struct tomoyo_io_buffer *head)
{
tomoyo_set_string(head, " ");
}
/**
* tomoyo_set_lf - Put a line feed to "struct tomoyo_io_buffer" structure.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns nothing.
*/
static bool tomoyo_set_lf(struct tomoyo_io_buffer *head)
{
tomoyo_set_string(head, "\n");
return !head->r.w_pos;
}
/**
* tomoyo_set_slash - Put a shash to "struct tomoyo_io_buffer" structure.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns nothing.
*/
static void tomoyo_set_slash(struct tomoyo_io_buffer *head)
{
tomoyo_set_string(head, "/");
}
/* List of namespaces. */
LIST_HEAD(tomoyo_namespace_list);
/* True if namespace other than tomoyo_kernel_namespace is defined. */
static bool tomoyo_namespace_enabled;
/**
* tomoyo_init_policy_namespace - Initialize namespace.
*
* @ns: Pointer to "struct tomoyo_policy_namespace".
*
* Returns nothing.
*/
void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns)
{
unsigned int idx;
for (idx = 0; idx < TOMOYO_MAX_ACL_GROUPS; idx++)
INIT_LIST_HEAD(&ns->acl_group[idx]);
for (idx = 0; idx < TOMOYO_MAX_GROUP; idx++)
INIT_LIST_HEAD(&ns->group_list[idx]);
for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++)
INIT_LIST_HEAD(&ns->policy_list[idx]);
ns->profile_version = 20150505;
tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list);
list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list);
}
/**
* tomoyo_print_namespace - Print namespace header.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns nothing.
*/
static void tomoyo_print_namespace(struct tomoyo_io_buffer *head)
{
if (!tomoyo_namespace_enabled)
return;
tomoyo_set_string(head,
container_of(head->r.ns,
struct tomoyo_policy_namespace,
namespace_list)->name);
tomoyo_set_space(head);
}
/**
* tomoyo_print_name_union - Print a tomoyo_name_union.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @ptr: Pointer to "struct tomoyo_name_union".
*/
static void tomoyo_print_name_union(struct tomoyo_io_buffer *head,
const struct tomoyo_name_union *ptr)
{
tomoyo_set_space(head);
if (ptr->group) {
tomoyo_set_string(head, "@");
tomoyo_set_string(head, ptr->group->group_name->name);
} else {
tomoyo_set_string(head, ptr->filename->name);
}
}
/**
* tomoyo_print_name_union_quoted - Print a tomoyo_name_union with a quote.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @ptr: Pointer to "struct tomoyo_name_union".
*
* Returns nothing.
*/
static void tomoyo_print_name_union_quoted(struct tomoyo_io_buffer *head,
const struct tomoyo_name_union *ptr)
{
if (ptr->group) {
tomoyo_set_string(head, "@");
tomoyo_set_string(head, ptr->group->group_name->name);
} else {
tomoyo_set_string(head, "\"");
tomoyo_set_string(head, ptr->filename->name);
tomoyo_set_string(head, "\"");
}
}
/**
* tomoyo_print_number_union_nospace - Print a tomoyo_number_union without a space.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @ptr: Pointer to "struct tomoyo_number_union".
*
* Returns nothing.
*/
static void tomoyo_print_number_union_nospace
(struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr)
{
if (ptr->group) {
tomoyo_set_string(head, "@");
tomoyo_set_string(head, ptr->group->group_name->name);
} else {
int i;
unsigned long min = ptr->values[0];
const unsigned long max = ptr->values[1];
u8 min_type = ptr->value_type[0];
const u8 max_type = ptr->value_type[1];
char buffer[128];
buffer[0] = '\0';
for (i = 0; i < 2; i++) {
switch (min_type) {
case TOMOYO_VALUE_TYPE_HEXADECIMAL:
tomoyo_addprintf(buffer, sizeof(buffer),
"0x%lX", min);
break;
case TOMOYO_VALUE_TYPE_OCTAL:
tomoyo_addprintf(buffer, sizeof(buffer),
"0%lo", min);
break;
default:
tomoyo_addprintf(buffer, sizeof(buffer), "%lu",
min);
break;
}
if (min == max && min_type == max_type)
break;
tomoyo_addprintf(buffer, sizeof(buffer), "-");
min_type = max_type;
min = max;
}
tomoyo_io_printf(head, "%s", buffer);
}
}
/**
* tomoyo_print_number_union - Print a tomoyo_number_union.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @ptr: Pointer to "struct tomoyo_number_union".
*
* Returns nothing.
*/
static void tomoyo_print_number_union(struct tomoyo_io_buffer *head,
const struct tomoyo_number_union *ptr)
{
tomoyo_set_space(head);
tomoyo_print_number_union_nospace(head, ptr);
}
/**
* tomoyo_assign_profile - Create a new profile.
*
* @ns: Pointer to "struct tomoyo_policy_namespace".
* @profile: Profile number to create.
*
* Returns pointer to "struct tomoyo_profile" on success, NULL otherwise.
*/
static struct tomoyo_profile *tomoyo_assign_profile
(struct tomoyo_policy_namespace *ns, const unsigned int profile)
{
struct tomoyo_profile *ptr;
struct tomoyo_profile *entry;
if (profile >= TOMOYO_MAX_PROFILES)
return NULL;
ptr = ns->profile_ptr[profile];
if (ptr)
return ptr;
entry = kzalloc(sizeof(*entry), GFP_NOFS | __GFP_NOWARN);
if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
ptr = ns->profile_ptr[profile];
if (!ptr && tomoyo_memory_ok(entry)) {
ptr = entry;
ptr->default_config = TOMOYO_CONFIG_DISABLED |
TOMOYO_CONFIG_WANT_GRANT_LOG |
TOMOYO_CONFIG_WANT_REJECT_LOG;
memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT,
sizeof(ptr->config));
ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] =
CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG;
ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] =
CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY;
mb(); /* Avoid out-of-order execution. */
ns->profile_ptr[profile] = ptr;
entry = NULL;
}
mutex_unlock(&tomoyo_policy_lock);
out:
kfree(entry);
return ptr;
}
/**
* tomoyo_profile - Find a profile.
*
* @ns: Pointer to "struct tomoyo_policy_namespace".
* @profile: Profile number to find.
*
* Returns pointer to "struct tomoyo_profile".
*/
struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
const u8 profile)
{
static struct tomoyo_profile tomoyo_null_profile;
struct tomoyo_profile *ptr = ns->profile_ptr[profile];
if (!ptr)
ptr = &tomoyo_null_profile;
return ptr;
}
/**
* tomoyo_find_yesno - Find values for specified keyword.
*
* @string: String to check.
* @find: Name of keyword.
*
* Returns 1 if "@find=yes" was found, 0 if "@find=no" was found, -1 otherwise.
*/
static s8 tomoyo_find_yesno(const char *string, const char *find)
{
const char *cp = strstr(string, find);
if (cp) {
cp += strlen(find);
if (!strncmp(cp, "=yes", 4))
return 1;
else if (!strncmp(cp, "=no", 3))
return 0;
}
return -1;
}
/**
* tomoyo_set_uint - Set value for specified preference.
*
* @i: Pointer to "unsigned int".
* @string: String to check.
* @find: Name of keyword.
*
* Returns nothing.
*/
static void tomoyo_set_uint(unsigned int *i, const char *string,
const char *find)
{
const char *cp = strstr(string, find);
if (cp)
sscanf(cp + strlen(find), "=%u", i);
}
/**
* tomoyo_set_mode - Set mode for specified profile.
*
* @name: Name of functionality.
* @value: Mode for @name.
* @profile: Pointer to "struct tomoyo_profile".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_set_mode(char *name, const char *value,
struct tomoyo_profile *profile)
{
u8 i;
u8 config;
if (!strcmp(name, "CONFIG")) {
i = TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX;
config = profile->default_config;
} else if (tomoyo_str_starts(&name, "CONFIG::")) {
config = 0;
for (i = 0; i < TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX; i++) {
int len = 0;
if (i < TOMOYO_MAX_MAC_INDEX) {
const u8 c = tomoyo_index2category[i];
const char *category =
tomoyo_category_keywords[c];
len = strlen(category);
if (strncmp(name, category, len) ||
name[len++] != ':' || name[len++] != ':')
continue;
}
if (strcmp(name + len, tomoyo_mac_keywords[i]))
continue;
config = profile->config[i];
break;
}
if (i == TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX)
return -EINVAL;
} else {
return -EINVAL;
}
if (strstr(value, "use_default")) {
config = TOMOYO_CONFIG_USE_DEFAULT;
} else {
u8 mode;
for (mode = 0; mode < 4; mode++)
if (strstr(value, tomoyo_mode[mode]))
/*
* Update lower 3 bits in order to distinguish
* 'config' from 'TOMOYO_CONFIG_USE_DEFAULT'.
*/
config = (config & ~7) | mode;
if (config != TOMOYO_CONFIG_USE_DEFAULT) {
switch (tomoyo_find_yesno(value, "grant_log")) {
case 1:
config |= TOMOYO_CONFIG_WANT_GRANT_LOG;
break;
case 0:
config &= ~TOMOYO_CONFIG_WANT_GRANT_LOG;
break;
}
switch (tomoyo_find_yesno(value, "reject_log")) {
case 1:
config |= TOMOYO_CONFIG_WANT_REJECT_LOG;
break;
case 0:
config &= ~TOMOYO_CONFIG_WANT_REJECT_LOG;
break;
}
}
}
if (i < TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX)
profile->config[i] = config;
else if (config != TOMOYO_CONFIG_USE_DEFAULT)
profile->default_config = config;
return 0;
}
/**
* tomoyo_write_profile - Write profile table.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_write_profile(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
unsigned int i;
char *cp;
struct tomoyo_profile *profile;
if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version)
== 1)
return 0;
i = simple_strtoul(data, &cp, 10);
if (*cp != '-')
return -EINVAL;
data = cp + 1;
profile = tomoyo_assign_profile(head->w.ns, i);
if (!profile)
return -EINVAL;
cp = strchr(data, '=');
if (!cp)
return -EINVAL;
*cp++ = '\0';
if (!strcmp(data, "COMMENT")) {
static DEFINE_SPINLOCK(lock);
const struct tomoyo_path_info *new_comment
= tomoyo_get_name(cp);
const struct tomoyo_path_info *old_comment;
if (!new_comment)
return -ENOMEM;
spin_lock(&lock);
old_comment = profile->comment;
profile->comment = new_comment;
spin_unlock(&lock);
tomoyo_put_name(old_comment);
return 0;
}
if (!strcmp(data, "PREFERENCE")) {
for (i = 0; i < TOMOYO_MAX_PREF; i++)
tomoyo_set_uint(&profile->pref[i], cp,
tomoyo_pref_keywords[i]);
return 0;
}
return tomoyo_set_mode(data, cp, profile);
}
/**
* tomoyo_print_config - Print mode for specified functionality.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @config: Mode for that functionality.
*
* Returns nothing.
*
* Caller prints functionality's name.
*/
static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config)
{
tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n",
tomoyo_mode[config & 3],
str_yes_no(config & TOMOYO_CONFIG_WANT_GRANT_LOG),
str_yes_no(config & TOMOYO_CONFIG_WANT_REJECT_LOG));
}
/**
* tomoyo_read_profile - Read profile table.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns nothing.
*/
static void tomoyo_read_profile(struct tomoyo_io_buffer *head)
{
u8 index;
struct tomoyo_policy_namespace *ns =
container_of(head->r.ns, typeof(*ns), namespace_list);
const struct tomoyo_profile *profile;
if (head->r.eof)
return;
next:
index = head->r.index;
profile = ns->profile_ptr[index];
switch (head->r.step) {
case 0:
tomoyo_print_namespace(head);
tomoyo_io_printf(head, "PROFILE_VERSION=%u\n",
ns->profile_version);
head->r.step++;
break;
case 1:
for ( ; head->r.index < TOMOYO_MAX_PROFILES;
head->r.index++)
if (ns->profile_ptr[head->r.index])
break;
if (head->r.index == TOMOYO_MAX_PROFILES) {
head->r.eof = true;
return;
}
head->r.step++;
break;
case 2:
{
u8 i;
const struct tomoyo_path_info *comment =
profile->comment;
tomoyo_print_namespace(head);
tomoyo_io_printf(head, "%u-COMMENT=", index);
tomoyo_set_string(head, comment ? comment->name : "");
tomoyo_set_lf(head);
tomoyo_print_namespace(head);
tomoyo_io_printf(head, "%u-PREFERENCE={ ", index);
for (i = 0; i < TOMOYO_MAX_PREF; i++)
tomoyo_io_printf(head, "%s=%u ",
tomoyo_pref_keywords[i],
profile->pref[i]);
tomoyo_set_string(head, "}\n");
head->r.step++;
}
break;
case 3:
{
tomoyo_print_namespace(head);
tomoyo_io_printf(head, "%u-%s", index, "CONFIG");
tomoyo_print_config(head, profile->default_config);
head->r.bit = 0;
head->r.step++;
}
break;
case 4:
for ( ; head->r.bit < TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX; head->r.bit++) {
const u8 i = head->r.bit;
const u8 config = profile->config[i];
if (config == TOMOYO_CONFIG_USE_DEFAULT)
continue;
tomoyo_print_namespace(head);
if (i < TOMOYO_MAX_MAC_INDEX)
tomoyo_io_printf(head, "%u-CONFIG::%s::%s",
index,
tomoyo_category_keywords
[tomoyo_index2category[i]],
tomoyo_mac_keywords[i]);
else
tomoyo_io_printf(head, "%u-CONFIG::%s", index,
tomoyo_mac_keywords[i]);
tomoyo_print_config(head, config);
head->r.bit++;
break;
}
if (head->r.bit == TOMOYO_MAX_MAC_INDEX
+ TOMOYO_MAX_MAC_CATEGORY_INDEX) {
head->r.index++;
head->r.step = 1;
}
break;
}
if (tomoyo_flush(head))
goto next;
}
/**
* tomoyo_same_manager - Check for duplicated "struct tomoyo_manager" entry.
*
* @a: Pointer to "struct tomoyo_acl_head".
* @b: Pointer to "struct tomoyo_acl_head".
*
* Returns true if @a == @b, false otherwise.
*/
static bool tomoyo_same_manager(const struct tomoyo_acl_head *a,
const struct tomoyo_acl_head *b)
{
return container_of(a, struct tomoyo_manager, head)->manager ==
container_of(b, struct tomoyo_manager, head)->manager;
}
/**
* tomoyo_update_manager_entry - Add a manager entry.
*
* @manager: The path to manager or the domainnamme.
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_manager_entry(const char *manager,
const bool is_delete)
{
struct tomoyo_manager e = { };
struct tomoyo_acl_param param = {
/* .ns = &tomoyo_kernel_namespace, */
.is_delete = is_delete,
.list = &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER],
};
int error = is_delete ? -ENOENT : -ENOMEM;
if (!tomoyo_correct_domain(manager) &&
!tomoyo_correct_word(manager))
return -EINVAL;
e.manager = tomoyo_get_name(manager);
if (e.manager) {
error = tomoyo_update_policy(&e.head, sizeof(e), ¶m,
tomoyo_same_manager);
tomoyo_put_name(e.manager);
}
return error;
}
/**
* tomoyo_write_manager - Write manager policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_manager(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
if (!strcmp(data, "manage_by_non_root")) {
tomoyo_manage_by_non_root = !head->w.is_delete;
return 0;
}
return tomoyo_update_manager_entry(data, head->w.is_delete);
}
/**
* tomoyo_read_manager - Read manager policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Caller holds tomoyo_read_lock().
*/
static void tomoyo_read_manager(struct tomoyo_io_buffer *head)
{
if (head->r.eof)
return;
list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER]) {
struct tomoyo_manager *ptr =
list_entry(head->r.acl, typeof(*ptr), head.list);
if (ptr->head.is_deleted)
continue;
if (!tomoyo_flush(head))
return;
tomoyo_set_string(head, ptr->manager->name);
tomoyo_set_lf(head);
}
head->r.eof = true;
}
/**
* tomoyo_manager - Check whether the current process is a policy manager.
*
* Returns true if the current process is permitted to modify policy
* via /sys/kernel/security/tomoyo/ interface.
*
* Caller holds tomoyo_read_lock().
*/
static bool tomoyo_manager(void)
{
struct tomoyo_manager *ptr;
const char *exe;
const struct task_struct *task = current;
const struct tomoyo_path_info *domainname = tomoyo_domain()->domainname;
bool found = IS_ENABLED(CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING);
if (!tomoyo_policy_loaded)
return true;
if (!tomoyo_manage_by_non_root &&
(!uid_eq(task->cred->uid, GLOBAL_ROOT_UID) ||
!uid_eq(task->cred->euid, GLOBAL_ROOT_UID)))
return false;
exe = tomoyo_get_exe();
if (!exe)
return false;
list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list,
srcu_read_lock_held(&tomoyo_ss)) {
if (!ptr->head.is_deleted &&
(!tomoyo_pathcmp(domainname, ptr->manager) ||
!strcmp(exe, ptr->manager->name))) {
found = true;
break;
}
}
if (!found) { /* Reduce error messages. */
static pid_t last_pid;
const pid_t pid = current->pid;
if (last_pid != pid) {
pr_warn("%s ( %s ) is not permitted to update policies.\n",
domainname->name, exe);
last_pid = pid;
}
}
kfree(exe);
return found;
}
static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
(unsigned int serial);
/**
* tomoyo_select_domain - Parse select command.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @data: String to parse.
*
* Returns true on success, false otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
const char *data)
{
unsigned int pid;
struct tomoyo_domain_info *domain = NULL;
bool global_pid = false;
if (strncmp(data, "select ", 7))
return false;
data += 7;
if (sscanf(data, "pid=%u", &pid) == 1 ||
(global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) {
struct task_struct *p;
rcu_read_lock();
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_task(p)->domain_info;
rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
if (tomoyo_domain_def(data + 7))
domain = tomoyo_find_domain(data + 7);
} else if (sscanf(data, "Q=%u", &pid) == 1) {
domain = tomoyo_find_domain_by_qid(pid);
} else
return false;
head->w.domain = domain;
/* Accessing read_buf is safe because head->io_sem is held. */
if (!head->read_buf)
return true; /* Do nothing if open(O_WRONLY). */
memset(&head->r, 0, sizeof(head->r));
head->r.print_this_domain_only = true;
if (domain)
head->r.domain = &domain->list;
else
head->r.eof = true;
tomoyo_io_printf(head, "# select %s\n", data);
if (domain && domain->is_deleted)
tomoyo_io_printf(head, "# This is a deleted domain.\n");
return true;
}
/**
* tomoyo_same_task_acl - Check for duplicated "struct tomoyo_task_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
*
* Returns true if @a == @b, false otherwise.
*/
static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head);
return p1->domainname == p2->domainname;
}
/**
* tomoyo_write_task - Update task related list.
*
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_task(struct tomoyo_acl_param *param)
{
int error = -EINVAL;
if (tomoyo_str_starts(¶m->data, "manual_domain_transition ")) {
struct tomoyo_task_acl e = {
.head.type = TOMOYO_TYPE_MANUAL_TASK_ACL,
.domainname = tomoyo_get_domainname(param),
};
if (e.domainname)
error = tomoyo_update_domain(&e.head, sizeof(e), param,
tomoyo_same_task_acl,
NULL);
tomoyo_put_name(e.domainname);
}
return error;
}
/**
* tomoyo_delete_domain - Delete a domain.
*
* @domainname: The name of domain.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_delete_domain(char *domainname)
{
struct tomoyo_domain_info *domain;
struct tomoyo_path_info name;
name.name = domainname;
tomoyo_fill_path_info(&name);
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return -EINTR;
/* Is there an active domain? */
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
srcu_read_lock_held(&tomoyo_ss)) {
/* Never delete tomoyo_kernel_domain */
if (domain == &tomoyo_kernel_domain)
continue;
if (domain->is_deleted ||
tomoyo_pathcmp(domain->domainname, &name))
continue;
domain->is_deleted = true;
break;
}
mutex_unlock(&tomoyo_policy_lock);
return 0;
}
/**
* tomoyo_write_domain2 - Write domain policy.
*
* @ns: Pointer to "struct tomoyo_policy_namespace".
* @list: Pointer to "struct list_head".
* @data: Policy to be interpreted.
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns,
struct list_head *list, char *data,
const bool is_delete)
{
struct tomoyo_acl_param param = {
.ns = ns,
.list = list,
.data = data,
.is_delete = is_delete,
};
static const struct {
const char *keyword;
int (*write)(struct tomoyo_acl_param *param);
} tomoyo_callback[5] = {
{ "file ", tomoyo_write_file },
{ "network inet ", tomoyo_write_inet_network },
{ "network unix ", tomoyo_write_unix_network },
{ "misc ", tomoyo_write_misc },
{ "task ", tomoyo_write_task },
};
u8 i;
for (i = 0; i < ARRAY_SIZE(tomoyo_callback); i++) {
if (!tomoyo_str_starts(¶m.data,
tomoyo_callback[i].keyword))
continue;
return tomoyo_callback[i].write(¶m);
}
return -EINVAL;
}
/* String table for domain flags. */
const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS] = {
[TOMOYO_DIF_QUOTA_WARNED] = "quota_exceeded\n",
[TOMOYO_DIF_TRANSITION_FAILED] = "transition_failed\n",
};
/**
* tomoyo_write_domain - Write domain policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_domain(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
struct tomoyo_policy_namespace *ns;
struct tomoyo_domain_info *domain = head->w.domain;
const bool is_delete = head->w.is_delete;
bool is_select = !is_delete && tomoyo_str_starts(&data, "select ");
unsigned int idx;
if (*data == '<') {
int ret = 0;
domain = NULL;
if (is_delete)
ret = tomoyo_delete_domain(data);
else if (is_select)
domain = tomoyo_find_domain(data);
else
domain = tomoyo_assign_domain(data, false);
head->w.domain = domain;
return ret;
}
if (!domain)
return -EINVAL;
ns = domain->ns;
if (sscanf(data, "use_profile %u", &idx) == 1
&& idx < TOMOYO_MAX_PROFILES) {
if (!tomoyo_policy_loaded || ns->profile_ptr[idx])
if (!is_delete)
domain->profile = (u8) idx;
return 0;
}
if (sscanf(data, "use_group %u\n", &idx) == 1
&& idx < TOMOYO_MAX_ACL_GROUPS) {
if (!is_delete)
set_bit(idx, domain->group);
else
clear_bit(idx, domain->group);
return 0;
}
for (idx = 0; idx < TOMOYO_MAX_DOMAIN_INFO_FLAGS; idx++) {
const char *cp = tomoyo_dif[idx];
if (strncmp(data, cp, strlen(cp) - 1))
continue;
domain->flags[idx] = !is_delete;
return 0;
}
return tomoyo_write_domain2(ns, &domain->acl_info_list, data,
is_delete);
}
/**
* tomoyo_print_condition - Print condition part.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @cond: Pointer to "struct tomoyo_condition".
*
* Returns true on success, false otherwise.
*/
static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
const struct tomoyo_condition *cond)
{
switch (head->r.cond_step) {
case 0:
head->r.cond_index = 0;
head->r.cond_step++;
if (cond->transit) {
tomoyo_set_space(head);
tomoyo_set_string(head, cond->transit->name);
}
fallthrough;
case 1:
{
const u16 condc = cond->condc;
const struct tomoyo_condition_element *condp =
(typeof(condp)) (cond + 1);
const struct tomoyo_number_union *numbers_p =
(typeof(numbers_p)) (condp + condc);
const struct tomoyo_name_union *names_p =
(typeof(names_p))
(numbers_p + cond->numbers_count);
const struct tomoyo_argv *argv =
(typeof(argv)) (names_p + cond->names_count);
const struct tomoyo_envp *envp =
(typeof(envp)) (argv + cond->argc);
u16 skip;
for (skip = 0; skip < head->r.cond_index; skip++) {
const u8 left = condp->left;
const u8 right = condp->right;
condp++;
switch (left) {
case TOMOYO_ARGV_ENTRY:
argv++;
continue;
case TOMOYO_ENVP_ENTRY:
envp++;
continue;
case TOMOYO_NUMBER_UNION:
numbers_p++;
break;
}
switch (right) {
case TOMOYO_NAME_UNION:
names_p++;
break;
case TOMOYO_NUMBER_UNION:
numbers_p++;
break;
}
}
while (head->r.cond_index < condc) {
const u8 match = condp->equals;
const u8 left = condp->left;
const u8 right = condp->right;
if (!tomoyo_flush(head))
return false;
condp++;
head->r.cond_index++;
tomoyo_set_space(head);
switch (left) {
case TOMOYO_ARGV_ENTRY:
tomoyo_io_printf(head,
"exec.argv[%lu]%s=\"",
argv->index, argv->is_not ? "!" : "");
tomoyo_set_string(head,
argv->value->name);
tomoyo_set_string(head, "\"");
argv++;
continue;
case TOMOYO_ENVP_ENTRY:
tomoyo_set_string(head,
"exec.envp[\"");
tomoyo_set_string(head,
envp->name->name);
tomoyo_io_printf(head, "\"]%s=", envp->is_not ? "!" : "");
if (envp->value) {
tomoyo_set_string(head, "\"");
tomoyo_set_string(head, envp->value->name);
tomoyo_set_string(head, "\"");
} else {
tomoyo_set_string(head,
"NULL");
}
envp++;
continue;
case TOMOYO_NUMBER_UNION:
tomoyo_print_number_union_nospace
(head, numbers_p++);
break;
default:
tomoyo_set_string(head,
tomoyo_condition_keyword[left]);
break;
}
tomoyo_set_string(head, match ? "=" : "!=");
switch (right) {
case TOMOYO_NAME_UNION:
tomoyo_print_name_union_quoted
(head, names_p++);
break;
case TOMOYO_NUMBER_UNION:
tomoyo_print_number_union_nospace
(head, numbers_p++);
break;
default:
tomoyo_set_string(head,
tomoyo_condition_keyword[right]);
break;
}
}
}
head->r.cond_step++;
fallthrough;
case 2:
if (!tomoyo_flush(head))
break;
head->r.cond_step++;
fallthrough;
case 3:
if (cond->grant_log != TOMOYO_GRANTLOG_AUTO)
tomoyo_io_printf(head, " grant_log=%s",
str_yes_no(cond->grant_log ==
TOMOYO_GRANTLOG_YES));
tomoyo_set_lf(head);
return true;
}
return false;
}
/**
* tomoyo_set_group - Print "acl_group " header keyword and category name.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @category: Category name.
*
* Returns nothing.
*/
static void tomoyo_set_group(struct tomoyo_io_buffer *head,
const char *category)
{
if (head->type == TOMOYO_EXCEPTIONPOLICY) {
tomoyo_print_namespace(head);
tomoyo_io_printf(head, "acl_group %u ",
head->r.acl_group_index);
}
tomoyo_set_string(head, category);
}
/**
* tomoyo_print_entry - Print an ACL entry.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @acl: Pointer to an ACL entry.
*
* Returns true on success, false otherwise.
*/
static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
struct tomoyo_acl_info *acl)
{
const u8 acl_type = acl->type;
bool first = true;
u8 bit;
if (head->r.print_cond_part)
goto print_cond_part;
if (acl->is_deleted)
return true;
if (!tomoyo_flush(head))
return false;
else if (acl_type == TOMOYO_TYPE_PATH_ACL) {
struct tomoyo_path_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u16 perm = ptr->perm;
for (bit = 0; bit < TOMOYO_MAX_PATH_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
if (head->r.print_transition_related_only &&
bit != TOMOYO_TYPE_EXECUTE)
continue;
if (first) {
tomoyo_set_group(head, "file ");
first = false;
} else {
tomoyo_set_slash(head);
}
tomoyo_set_string(head, tomoyo_path_keyword[bit]);
}
if (first)
return true;
tomoyo_print_name_union(head, &ptr->name);
} else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) {
struct tomoyo_task_acl *ptr =
container_of(acl, typeof(*ptr), head);
tomoyo_set_group(head, "task ");
tomoyo_set_string(head, "manual_domain_transition ");
tomoyo_set_string(head, ptr->domainname->name);
} else if (head->r.print_transition_related_only) {
return true;
} else if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
struct tomoyo_path2_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u8 perm = ptr->perm;
for (bit = 0; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
if (first) {
tomoyo_set_group(head, "file ");
first = false;
} else {
tomoyo_set_slash(head);
}
tomoyo_set_string(head, tomoyo_mac_keywords
[tomoyo_pp2mac[bit]]);
}
if (first)
return true;
tomoyo_print_name_union(head, &ptr->name1);
tomoyo_print_name_union(head, &ptr->name2);
} else if (acl_type == TOMOYO_TYPE_PATH_NUMBER_ACL) {
struct tomoyo_path_number_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u8 perm = ptr->perm;
for (bit = 0; bit < TOMOYO_MAX_PATH_NUMBER_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
if (first) {
tomoyo_set_group(head, "file ");
first = false;
} else {
tomoyo_set_slash(head);
}
tomoyo_set_string(head, tomoyo_mac_keywords
[tomoyo_pn2mac[bit]]);
}
if (first)
return true;
tomoyo_print_name_union(head, &ptr->name);
tomoyo_print_number_union(head, &ptr->number);
} else if (acl_type == TOMOYO_TYPE_MKDEV_ACL) {
struct tomoyo_mkdev_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u8 perm = ptr->perm;
for (bit = 0; bit < TOMOYO_MAX_MKDEV_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
if (first) {
tomoyo_set_group(head, "file ");
first = false;
} else {
tomoyo_set_slash(head);
}
tomoyo_set_string(head, tomoyo_mac_keywords
[tomoyo_pnnn2mac[bit]]);
}
if (first)
return true;
tomoyo_print_name_union(head, &ptr->name);
tomoyo_print_number_union(head, &ptr->mode);
tomoyo_print_number_union(head, &ptr->major);
tomoyo_print_number_union(head, &ptr->minor);
} else if (acl_type == TOMOYO_TYPE_INET_ACL) {
struct tomoyo_inet_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u8 perm = ptr->perm;
for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
if (first) {
tomoyo_set_group(head, "network inet ");
tomoyo_set_string(head, tomoyo_proto_keyword
[ptr->protocol]);
tomoyo_set_space(head);
first = false;
} else {
tomoyo_set_slash(head);
}
tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
}
if (first)
return true;
tomoyo_set_space(head);
if (ptr->address.group) {
tomoyo_set_string(head, "@");
tomoyo_set_string(head, ptr->address.group->group_name
->name);
} else {
char buf[128];
tomoyo_print_ip(buf, sizeof(buf), &ptr->address);
tomoyo_io_printf(head, "%s", buf);
}
tomoyo_print_number_union(head, &ptr->port);
} else if (acl_type == TOMOYO_TYPE_UNIX_ACL) {
struct tomoyo_unix_acl *ptr =
container_of(acl, typeof(*ptr), head);
const u8 perm = ptr->perm;
for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
if (!(perm & (1 << bit)))
continue;
if (first) {
tomoyo_set_group(head, "network unix ");
tomoyo_set_string(head, tomoyo_proto_keyword
[ptr->protocol]);
tomoyo_set_space(head);
first = false;
} else {
tomoyo_set_slash(head);
}
tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
}
if (first)
return true;
tomoyo_print_name_union(head, &ptr->name);
} else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) {
struct tomoyo_mount_acl *ptr =
container_of(acl, typeof(*ptr), head);
tomoyo_set_group(head, "file mount");
tomoyo_print_name_union(head, &ptr->dev_name);
tomoyo_print_name_union(head, &ptr->dir_name);
tomoyo_print_name_union(head, &ptr->fs_type);
tomoyo_print_number_union(head, &ptr->flags);
} else if (acl_type == TOMOYO_TYPE_ENV_ACL) {
struct tomoyo_env_acl *ptr =
container_of(acl, typeof(*ptr), head);
tomoyo_set_group(head, "misc env ");
tomoyo_set_string(head, ptr->env->name);
}
if (acl->cond) {
head->r.print_cond_part = true;
head->r.cond_step = 0;
if (!tomoyo_flush(head))
return false;
print_cond_part:
if (!tomoyo_print_condition(head, acl->cond))
return false;
head->r.print_cond_part = false;
} else {
tomoyo_set_lf(head);
}
return true;
}
/**
* tomoyo_read_domain2 - Read domain policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @list: Pointer to "struct list_head".
*
* Caller holds tomoyo_read_lock().
*
* Returns true on success, false otherwise.
*/
static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head,
struct list_head *list)
{
list_for_each_cookie(head->r.acl, list) {
struct tomoyo_acl_info *ptr =
list_entry(head->r.acl, typeof(*ptr), list);
if (!tomoyo_print_entry(head, ptr))
return false;
}
head->r.acl = NULL;
return true;
}
/**
* tomoyo_read_domain - Read domain policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Caller holds tomoyo_read_lock().
*/
static void tomoyo_read_domain(struct tomoyo_io_buffer *head)
{
if (head->r.eof)
return;
list_for_each_cookie(head->r.domain, &tomoyo_domain_list) {
struct tomoyo_domain_info *domain =
list_entry(head->r.domain, typeof(*domain), list);
u8 i;
switch (head->r.step) {
case 0:
if (domain->is_deleted &&
!head->r.print_this_domain_only)
continue;
/* Print domainname and flags. */
tomoyo_set_string(head, domain->domainname->name);
tomoyo_set_lf(head);
tomoyo_io_printf(head, "use_profile %u\n",
domain->profile);
for (i = 0; i < TOMOYO_MAX_DOMAIN_INFO_FLAGS; i++)
if (domain->flags[i])
tomoyo_set_string(head, tomoyo_dif[i]);
head->r.index = 0;
head->r.step++;
fallthrough;
case 1:
while (head->r.index < TOMOYO_MAX_ACL_GROUPS) {
i = head->r.index++;
if (!test_bit(i, domain->group))
continue;
tomoyo_io_printf(head, "use_group %u\n", i);
if (!tomoyo_flush(head))
return;
}
head->r.index = 0;
head->r.step++;
tomoyo_set_lf(head);
fallthrough;
case 2:
if (!tomoyo_read_domain2(head, &domain->acl_info_list))
return;
head->r.step++;
if (!tomoyo_set_lf(head))
return;
fallthrough;
case 3:
head->r.step = 0;
if (head->r.print_this_domain_only)
goto done;
}
}
done:
head->r.eof = true;
}
/**
* tomoyo_write_pid: Specify PID to obtain domainname.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0.
*/
static int tomoyo_write_pid(struct tomoyo_io_buffer *head)
{
head->r.eof = false;
return 0;
}
/**
* tomoyo_read_pid - Get domainname of the specified PID.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns the domainname which the specified PID is in on success,
* empty string otherwise.
* The PID is specified by tomoyo_write_pid() so that the user can obtain
* using read()/write() interface rather than sysctl() interface.
*/
static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
{
char *buf = head->write_buf;
bool global_pid = false;
unsigned int pid;
struct task_struct *p;
struct tomoyo_domain_info *domain = NULL;
/* Accessing write_buf is safe because head->io_sem is held. */
if (!buf) {
head->r.eof = true;
return; /* Do nothing if open(O_RDONLY). */
}
if (head->r.w_pos || head->r.eof)
return;
head->r.eof = true;
if (tomoyo_str_starts(&buf, "global-pid "))
global_pid = true;
if (kstrtouint(buf, 10, &pid))
return;
rcu_read_lock();
if (global_pid)
p = find_task_by_pid_ns(pid, &init_pid_ns);
else
p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_task(p)->domain_info;
rcu_read_unlock();
if (!domain)
return;
tomoyo_io_printf(head, "%u %u ", pid, domain->profile);
tomoyo_set_string(head, domain->domainname->name);
}
/* String table for domain transition control keywords. */
static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = {
[TOMOYO_TRANSITION_CONTROL_NO_RESET] = "no_reset_domain ",
[TOMOYO_TRANSITION_CONTROL_RESET] = "reset_domain ",
[TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] = "no_initialize_domain ",
[TOMOYO_TRANSITION_CONTROL_INITIALIZE] = "initialize_domain ",
[TOMOYO_TRANSITION_CONTROL_NO_KEEP] = "no_keep_domain ",
[TOMOYO_TRANSITION_CONTROL_KEEP] = "keep_domain ",
};
/* String table for grouping keywords. */
static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = {
[TOMOYO_PATH_GROUP] = "path_group ",
[TOMOYO_NUMBER_GROUP] = "number_group ",
[TOMOYO_ADDRESS_GROUP] = "address_group ",
};
/**
* tomoyo_write_exception - Write exception policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_exception(struct tomoyo_io_buffer *head)
{
const bool is_delete = head->w.is_delete;
struct tomoyo_acl_param param = {
.ns = head->w.ns,
.is_delete = is_delete,
.data = head->write_buf,
};
u8 i;
if (tomoyo_str_starts(¶m.data, "aggregator "))
return tomoyo_write_aggregator(¶m);
for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++)
if (tomoyo_str_starts(¶m.data, tomoyo_transition_type[i]))
return tomoyo_write_transition_control(¶m, i);
for (i = 0; i < TOMOYO_MAX_GROUP; i++)
if (tomoyo_str_starts(¶m.data, tomoyo_group_name[i]))
return tomoyo_write_group(¶m, i);
if (tomoyo_str_starts(¶m.data, "acl_group ")) {
unsigned int group;
char *data;
group = simple_strtoul(param.data, &data, 10);
if (group < TOMOYO_MAX_ACL_GROUPS && *data++ == ' ')
return tomoyo_write_domain2
(head->w.ns, &head->w.ns->acl_group[group],
data, is_delete);
}
return -EINVAL;
}
/**
* tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @idx: Index number.
*
* Returns true on success, false otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx)
{
struct tomoyo_policy_namespace *ns =
container_of(head->r.ns, typeof(*ns), namespace_list);
struct list_head *list = &ns->group_list[idx];
list_for_each_cookie(head->r.group, list) {
struct tomoyo_group *group =
list_entry(head->r.group, typeof(*group), head.list);
list_for_each_cookie(head->r.acl, &group->member_list) {
struct tomoyo_acl_head *ptr =
list_entry(head->r.acl, typeof(*ptr), list);
if (ptr->is_deleted)
continue;
if (!tomoyo_flush(head))
return false;
tomoyo_print_namespace(head);
tomoyo_set_string(head, tomoyo_group_name[idx]);
tomoyo_set_string(head, group->group_name->name);
if (idx == TOMOYO_PATH_GROUP) {
tomoyo_set_space(head);
tomoyo_set_string(head, container_of
(ptr, struct tomoyo_path_group,
head)->member_name->name);
} else if (idx == TOMOYO_NUMBER_GROUP) {
tomoyo_print_number_union(head, &container_of
(ptr,
struct tomoyo_number_group,
head)->number);
} else if (idx == TOMOYO_ADDRESS_GROUP) {
char buffer[128];
struct tomoyo_address_group *member =
container_of(ptr, typeof(*member),
head);
tomoyo_print_ip(buffer, sizeof(buffer),
&member->address);
tomoyo_io_printf(head, " %s", buffer);
}
tomoyo_set_lf(head);
}
head->r.acl = NULL;
}
head->r.group = NULL;
return true;
}
/**
* tomoyo_read_policy - Read "struct tomoyo_..._entry" list.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @idx: Index number.
*
* Returns true on success, false otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx)
{
struct tomoyo_policy_namespace *ns =
container_of(head->r.ns, typeof(*ns), namespace_list);
struct list_head *list = &ns->policy_list[idx];
list_for_each_cookie(head->r.acl, list) {
struct tomoyo_acl_head *acl =
container_of(head->r.acl, typeof(*acl), list);
if (acl->is_deleted)
continue;
if (!tomoyo_flush(head))
return false;
switch (idx) {
case TOMOYO_ID_TRANSITION_CONTROL:
{
struct tomoyo_transition_control *ptr =
container_of(acl, typeof(*ptr), head);
tomoyo_print_namespace(head);
tomoyo_set_string(head, tomoyo_transition_type
[ptr->type]);
tomoyo_set_string(head, ptr->program ?
ptr->program->name : "any");
tomoyo_set_string(head, " from ");
tomoyo_set_string(head, ptr->domainname ?
ptr->domainname->name :
"any");
}
break;
case TOMOYO_ID_AGGREGATOR:
{
struct tomoyo_aggregator *ptr =
container_of(acl, typeof(*ptr), head);
tomoyo_print_namespace(head);
tomoyo_set_string(head, "aggregator ");
tomoyo_set_string(head,
ptr->original_name->name);
tomoyo_set_space(head);
tomoyo_set_string(head,
ptr->aggregated_name->name);
}
break;
default:
continue;
}
tomoyo_set_lf(head);
}
head->r.acl = NULL;
return true;
}
/**
* tomoyo_read_exception - Read exception policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Caller holds tomoyo_read_lock().
*/
static void tomoyo_read_exception(struct tomoyo_io_buffer *head)
{
struct tomoyo_policy_namespace *ns =
container_of(head->r.ns, typeof(*ns), namespace_list);
if (head->r.eof)
return;
while (head->r.step < TOMOYO_MAX_POLICY &&
tomoyo_read_policy(head, head->r.step))
head->r.step++;
if (head->r.step < TOMOYO_MAX_POLICY)
return;
while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP &&
tomoyo_read_group(head, head->r.step - TOMOYO_MAX_POLICY))
head->r.step++;
if (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP)
return;
while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP
+ TOMOYO_MAX_ACL_GROUPS) {
head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY
- TOMOYO_MAX_GROUP;
if (!tomoyo_read_domain2(head, &ns->acl_group
[head->r.acl_group_index]))
return;
head->r.step++;
}
head->r.eof = true;
}
/* Wait queue for kernel -> userspace notification. */
static DECLARE_WAIT_QUEUE_HEAD(tomoyo_query_wait);
/* Wait queue for userspace -> kernel notification. */
static DECLARE_WAIT_QUEUE_HEAD(tomoyo_answer_wait);
/* Structure for query. */
struct tomoyo_query {
struct list_head list;
struct tomoyo_domain_info *domain;
char *query;
size_t query_len;
unsigned int serial;
u8 timer;
u8 answer;
u8 retry;
};
/* The list for "struct tomoyo_query". */
static LIST_HEAD(tomoyo_query_list);
/* Lock for manipulating tomoyo_query_list. */
static DEFINE_SPINLOCK(tomoyo_query_list_lock);
/*
* Number of "struct file" referring /sys/kernel/security/tomoyo/query
* interface.
*/
static atomic_t tomoyo_query_observers = ATOMIC_INIT(0);
/**
* tomoyo_truncate - Truncate a line.
*
* @str: String to truncate.
*
* Returns length of truncated @str.
*/
static int tomoyo_truncate(char *str)
{
char *start = str;
while (*(unsigned char *) str > (unsigned char) ' ')
str++;
*str = '\0';
return strlen(start) + 1;
}
/**
* tomoyo_add_entry - Add an ACL to current thread's domain. Used by learning mode.
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @header: Lines containing ACL.
*
* Returns nothing.
*/
static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header)
{
char *buffer;
char *realpath = NULL;
char *argv0 = NULL;
char *symlink = NULL;
char *cp = strchr(header, '\n');
int len;
if (!cp)
return;
cp = strchr(cp + 1, '\n');
if (!cp)
return;
*cp++ = '\0';
len = strlen(cp) + 1;
/* strstr() will return NULL if ordering is wrong. */
if (*cp == 'f') {
argv0 = strstr(header, " argv[]={ \"");
if (argv0) {
argv0 += 10;
len += tomoyo_truncate(argv0) + 14;
}
realpath = strstr(header, " exec={ realpath=\"");
if (realpath) {
realpath += 8;
len += tomoyo_truncate(realpath) + 6;
}
symlink = strstr(header, " symlink.target=\"");
if (symlink)
len += tomoyo_truncate(symlink + 1) + 1;
}
buffer = kmalloc(len, GFP_NOFS);
if (!buffer)
return;
snprintf(buffer, len - 1, "%s", cp);
if (realpath)
tomoyo_addprintf(buffer, len, " exec.%s", realpath);
if (argv0)
tomoyo_addprintf(buffer, len, " exec.argv[0]=%s", argv0);
if (symlink)
tomoyo_addprintf(buffer, len, "%s", symlink);
tomoyo_normalize_line(buffer);
if (!tomoyo_write_domain2(domain->ns, &domain->acl_info_list, buffer,
false))
tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
kfree(buffer);
}
/**
* tomoyo_supervisor - Ask for the supervisor's decision.
*
* @r: Pointer to "struct tomoyo_request_info".
* @fmt: The printf()'s format string, followed by parameters.
*
* Returns 0 if the supervisor decided to permit the access request which
* violated the policy in enforcing mode, TOMOYO_RETRY_REQUEST if the
* supervisor decided to retry the access request which violated the policy in
* enforcing mode, 0 if it is not in enforcing mode, -EPERM otherwise.
*/
int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
{
va_list args;
int error;
int len;
static unsigned int tomoyo_serial;
struct tomoyo_query entry = { };
bool quota_exceeded = false;
va_start(args, fmt);
len = vsnprintf(NULL, 0, fmt, args) + 1;
va_end(args);
/* Write /sys/kernel/security/tomoyo/audit. */
va_start(args, fmt);
tomoyo_write_log2(r, len, fmt, args);
va_end(args);
/* Nothing more to do if granted. */
if (r->granted)
return 0;
if (r->mode)
tomoyo_update_stat(r->mode);
switch (r->mode) {
case TOMOYO_CONFIG_ENFORCING:
error = -EPERM;
if (atomic_read(&tomoyo_query_observers))
break;
goto out;
case TOMOYO_CONFIG_LEARNING:
error = 0;
/* Check max_learning_entry parameter. */
if (tomoyo_domain_quota_is_ok(r))
break;
fallthrough;
default:
return 0;
}
/* Get message. */
va_start(args, fmt);
entry.query = tomoyo_init_log(r, len, fmt, args);
va_end(args);
if (!entry.query)
goto out;
entry.query_len = strlen(entry.query) + 1;
if (!error) {
tomoyo_add_entry(r->domain, entry.query);
goto out;
}
len = kmalloc_size_roundup(entry.query_len);
entry.domain = r->domain;
spin_lock(&tomoyo_query_list_lock);
if (tomoyo_memory_quota[TOMOYO_MEMORY_QUERY] &&
tomoyo_memory_used[TOMOYO_MEMORY_QUERY] + len
>= tomoyo_memory_quota[TOMOYO_MEMORY_QUERY]) {
quota_exceeded = true;
} else {
entry.serial = tomoyo_serial++;
entry.retry = r->retry;
tomoyo_memory_used[TOMOYO_MEMORY_QUERY] += len;
list_add_tail(&entry.list, &tomoyo_query_list);
}
spin_unlock(&tomoyo_query_list_lock);
if (quota_exceeded)
goto out;
/* Give 10 seconds for supervisor's opinion. */
while (entry.timer < 10) {
wake_up_all(&tomoyo_query_wait);
if (wait_event_interruptible_timeout
(tomoyo_answer_wait, entry.answer ||
!atomic_read(&tomoyo_query_observers), HZ))
break;
entry.timer++;
}
spin_lock(&tomoyo_query_list_lock);
list_del(&entry.list);
tomoyo_memory_used[TOMOYO_MEMORY_QUERY] -= len;
spin_unlock(&tomoyo_query_list_lock);
switch (entry.answer) {
case 3: /* Asked to retry by administrator. */
error = TOMOYO_RETRY_REQUEST;
r->retry++;
break;
case 1:
/* Granted by administrator. */
error = 0;
break;
default:
/* Timed out or rejected by administrator. */
break;
}
out:
kfree(entry.query);
return error;
}
/**
* tomoyo_find_domain_by_qid - Get domain by query id.
*
* @serial: Query ID assigned by tomoyo_supervisor().
*
* Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
*/
static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
(unsigned int serial)
{
struct tomoyo_query *ptr;
struct tomoyo_domain_info *domain = NULL;
spin_lock(&tomoyo_query_list_lock);
list_for_each_entry(ptr, &tomoyo_query_list, list) {
if (ptr->serial != serial)
continue;
domain = ptr->domain;
break;
}
spin_unlock(&tomoyo_query_list_lock);
return domain;
}
/**
* tomoyo_poll_query - poll() for /sys/kernel/security/tomoyo/query.
*
* @file: Pointer to "struct file".
* @wait: Pointer to "poll_table".
*
* Returns EPOLLIN | EPOLLRDNORM when ready to read, 0 otherwise.
*
* Waits for access requests which violated policy in enforcing mode.
*/
static __poll_t tomoyo_poll_query(struct file *file, poll_table *wait)
{
if (!list_empty(&tomoyo_query_list))
return EPOLLIN | EPOLLRDNORM;
poll_wait(file, &tomoyo_query_wait, wait);
if (!list_empty(&tomoyo_query_list))
return EPOLLIN | EPOLLRDNORM;
return 0;
}
/**
* tomoyo_read_query - Read access requests which violated policy in enforcing mode.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*/
static void tomoyo_read_query(struct tomoyo_io_buffer *head)
{
struct list_head *tmp;
unsigned int pos = 0;
size_t len = 0;
char *buf;
if (head->r.w_pos)
return;
kfree(head->read_buf);
head->read_buf = NULL;
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
if (pos++ != head->r.query_index)
continue;
len = ptr->query_len;
break;
}
spin_unlock(&tomoyo_query_list_lock);
if (!len) {
head->r.query_index = 0;
return;
}
buf = kzalloc(len + 32, GFP_NOFS);
if (!buf)
return;
pos = 0;
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
if (pos++ != head->r.query_index)
continue;
/*
* Some query can be skipped because tomoyo_query_list
* can change, but I don't care.
*/
if (len == ptr->query_len)
snprintf(buf, len + 31, "Q%u-%hu\n%s", ptr->serial,
ptr->retry, ptr->query);
break;
}
spin_unlock(&tomoyo_query_list_lock);
if (buf[0]) {
head->read_buf = buf;
head->r.w[head->r.w_pos++] = buf;
head->r.query_index++;
} else {
kfree(buf);
}
}
/**
* tomoyo_write_answer - Write the supervisor's decision.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, -EINVAL otherwise.
*/
static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
struct list_head *tmp;
unsigned int serial;
unsigned int answer;
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
ptr->timer = 0;
}
spin_unlock(&tomoyo_query_list_lock);
if (sscanf(data, "A%u=%u", &serial, &answer) != 2)
return -EINVAL;
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
if (ptr->serial != serial)
continue;
ptr->answer = answer;
/* Remove from tomoyo_query_list. */
if (ptr->answer)
list_del_init(&ptr->list);
break;
}
spin_unlock(&tomoyo_query_list_lock);
return 0;
}
/**
* tomoyo_read_version: Get version.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns version information.
*/
static void tomoyo_read_version(struct tomoyo_io_buffer *head)
{
if (!head->r.eof) {
tomoyo_io_printf(head, "2.6.0");
head->r.eof = true;
}
}
/* String table for /sys/kernel/security/tomoyo/stat interface. */
static const char * const tomoyo_policy_headers[TOMOYO_MAX_POLICY_STAT] = {
[TOMOYO_STAT_POLICY_UPDATES] = "update:",
[TOMOYO_STAT_POLICY_LEARNING] = "violation in learning mode:",
[TOMOYO_STAT_POLICY_PERMISSIVE] = "violation in permissive mode:",
[TOMOYO_STAT_POLICY_ENFORCING] = "violation in enforcing mode:",
};
/* String table for /sys/kernel/security/tomoyo/stat interface. */
static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = {
[TOMOYO_MEMORY_POLICY] = "policy:",
[TOMOYO_MEMORY_AUDIT] = "audit log:",
[TOMOYO_MEMORY_QUERY] = "query message:",
};
/* Counter for number of updates. */
static atomic_t tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT];
/* Timestamp counter for last updated. */
static time64_t tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
/**
* tomoyo_update_stat - Update statistic counters.
*
* @index: Index for policy type.
*
* Returns nothing.
*/
void tomoyo_update_stat(const u8 index)
{
atomic_inc(&tomoyo_stat_updated[index]);
tomoyo_stat_modified[index] = ktime_get_real_seconds();
}
/**
* tomoyo_read_stat - Read statistic data.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns nothing.
*/
static void tomoyo_read_stat(struct tomoyo_io_buffer *head)
{
u8 i;
unsigned int total = 0;
if (head->r.eof)
return;
for (i = 0; i < TOMOYO_MAX_POLICY_STAT; i++) {
tomoyo_io_printf(head, "Policy %-30s %10u",
tomoyo_policy_headers[i],
atomic_read(&tomoyo_stat_updated[i]));
if (tomoyo_stat_modified[i]) {
struct tomoyo_time stamp;
tomoyo_convert_time(tomoyo_stat_modified[i], &stamp);
tomoyo_io_printf(head, " (Last: %04u/%02u/%02u %02u:%02u:%02u)",
stamp.year, stamp.month, stamp.day,
stamp.hour, stamp.min, stamp.sec);
}
tomoyo_set_lf(head);
}
for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++) {
unsigned int used = tomoyo_memory_used[i];
total += used;
tomoyo_io_printf(head, "Memory used by %-22s %10u",
tomoyo_memory_headers[i], used);
used = tomoyo_memory_quota[i];
if (used)
tomoyo_io_printf(head, " (Quota: %10u)", used);
tomoyo_set_lf(head);
}
tomoyo_io_printf(head, "Total memory used: %10u\n",
total);
head->r.eof = true;
}
/**
* tomoyo_write_stat - Set memory quota.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0.
*/
static int tomoyo_write_stat(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
u8 i;
if (tomoyo_str_starts(&data, "Memory used by "))
for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++)
if (tomoyo_str_starts(&data, tomoyo_memory_headers[i]))
sscanf(data, "%u", &tomoyo_memory_quota[i]);
return 0;
}
/**
* tomoyo_open_control - open() for /sys/kernel/security/tomoyo/ interface.
*
* @type: Type of interface.
* @file: Pointer to "struct file".
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_open_control(const u8 type, struct file *file)
{
struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_NOFS);
if (!head)
return -ENOMEM;
mutex_init(&head->io_sem);
head->type = type;
switch (type) {
case TOMOYO_DOMAINPOLICY:
/* /sys/kernel/security/tomoyo/domain_policy */
head->write = tomoyo_write_domain;
head->read = tomoyo_read_domain;
break;
case TOMOYO_EXCEPTIONPOLICY:
/* /sys/kernel/security/tomoyo/exception_policy */
head->write = tomoyo_write_exception;
head->read = tomoyo_read_exception;
break;
case TOMOYO_AUDIT:
/* /sys/kernel/security/tomoyo/audit */
head->poll = tomoyo_poll_log;
head->read = tomoyo_read_log;
break;
case TOMOYO_PROCESS_STATUS:
/* /sys/kernel/security/tomoyo/.process_status */
head->write = tomoyo_write_pid;
head->read = tomoyo_read_pid;
break;
case TOMOYO_VERSION:
/* /sys/kernel/security/tomoyo/version */
head->read = tomoyo_read_version;
head->readbuf_size = 128;
break;
case TOMOYO_STAT:
/* /sys/kernel/security/tomoyo/stat */
head->write = tomoyo_write_stat;
head->read = tomoyo_read_stat;
head->readbuf_size = 1024;
break;
case TOMOYO_PROFILE:
/* /sys/kernel/security/tomoyo/profile */
head->write = tomoyo_write_profile;
head->read = tomoyo_read_profile;
break;
case TOMOYO_QUERY: /* /sys/kernel/security/tomoyo/query */
head->poll = tomoyo_poll_query;
head->write = tomoyo_write_answer;
head->read = tomoyo_read_query;
break;
case TOMOYO_MANAGER:
/* /sys/kernel/security/tomoyo/manager */
head->write = tomoyo_write_manager;
head->read = tomoyo_read_manager;
break;
}
if (!(file->f_mode & FMODE_READ)) {
/*
* No need to allocate read_buf since it is not opened
* for reading.
*/
head->read = NULL;
head->poll = NULL;
} else if (!head->poll) {
/* Don't allocate read_buf for poll() access. */
if (!head->readbuf_size)
head->readbuf_size = 4096 * 2;
head->read_buf = kzalloc(head->readbuf_size, GFP_NOFS);
if (!head->read_buf) {
kfree(head);
return -ENOMEM;
}
}
if (!(file->f_mode & FMODE_WRITE)) {
/*
* No need to allocate write_buf since it is not opened
* for writing.
*/
head->write = NULL;
} else if (head->write) {
head->writebuf_size = 4096 * 2;
head->write_buf = kzalloc(head->writebuf_size, GFP_NOFS);
if (!head->write_buf) {
kfree(head->read_buf);
kfree(head);
return -ENOMEM;
}
}
/*
* If the file is /sys/kernel/security/tomoyo/query , increment the
* observer counter.
* The obserber counter is used by tomoyo_supervisor() to see if
* there is some process monitoring /sys/kernel/security/tomoyo/query.
*/
if (type == TOMOYO_QUERY)
atomic_inc(&tomoyo_query_observers);
file->private_data = head;
tomoyo_notify_gc(head, true);
return 0;
}
/**
* tomoyo_poll_control - poll() for /sys/kernel/security/tomoyo/ interface.
*
* @file: Pointer to "struct file".
* @wait: Pointer to "poll_table". Maybe NULL.
*
* Returns EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM if ready to read/write,
* EPOLLOUT | EPOLLWRNORM otherwise.
*/
__poll_t tomoyo_poll_control(struct file *file, poll_table *wait)
{
struct tomoyo_io_buffer *head = file->private_data;
if (head->poll)
return head->poll(file, wait) | EPOLLOUT | EPOLLWRNORM;
return EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM;
}
/**
* tomoyo_set_namespace_cursor - Set namespace to read.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns nothing.
*/
static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head)
{
struct list_head *ns;
if (head->type != TOMOYO_EXCEPTIONPOLICY &&
head->type != TOMOYO_PROFILE)
return;
/*
* If this is the first read, or reading previous namespace finished
* and has more namespaces to read, update the namespace cursor.
*/
ns = head->r.ns;
if (!ns || (head->r.eof && ns->next != &tomoyo_namespace_list)) {
/* Clearing is OK because tomoyo_flush() returned true. */
memset(&head->r, 0, sizeof(head->r));
head->r.ns = ns ? ns->next : tomoyo_namespace_list.next;
}
}
/**
* tomoyo_has_more_namespace - Check for unread namespaces.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true if we have more entries to print, false otherwise.
*/
static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head)
{
return (head->type == TOMOYO_EXCEPTIONPOLICY ||
head->type == TOMOYO_PROFILE) && head->r.eof &&
head->r.ns->next != &tomoyo_namespace_list;
}
/**
* tomoyo_read_control - read() for /sys/kernel/security/tomoyo/ interface.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @buffer: Pointer to buffer to write to.
* @buffer_len: Size of @buffer.
*
* Returns bytes read on success, negative value otherwise.
*/
ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
const int buffer_len)
{
int len;
int idx;
if (!head->read)
return -EINVAL;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
head->read_user_buf = buffer;
head->read_user_buf_avail = buffer_len;
idx = tomoyo_read_lock();
if (tomoyo_flush(head))
/* Call the policy handler. */
do {
tomoyo_set_namespace_cursor(head);
head->read(head);
} while (tomoyo_flush(head) &&
tomoyo_has_more_namespace(head));
tomoyo_read_unlock(idx);
len = head->read_user_buf - buffer;
mutex_unlock(&head->io_sem);
return len;
}
/**
* tomoyo_parse_policy - Parse a policy line.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @line: Line to parse.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line)
{
/* Delete request? */
head->w.is_delete = !strncmp(line, "delete ", 7);
if (head->w.is_delete)
memmove(line, line + 7, strlen(line + 7) + 1);
/* Selecting namespace to update. */
if (head->type == TOMOYO_EXCEPTIONPOLICY ||
head->type == TOMOYO_PROFILE) {
if (*line == '<') {
char *cp = strchr(line, ' ');
if (cp) {
*cp++ = '\0';
head->w.ns = tomoyo_assign_namespace(line);
memmove(line, cp, strlen(cp) + 1);
} else
head->w.ns = NULL;
} else
head->w.ns = &tomoyo_kernel_namespace;
/* Don't allow updating if namespace is invalid. */
if (!head->w.ns)
return -ENOENT;
}
/* Do the update. */
return head->write(head);
}
/**
* tomoyo_write_control - write() for /sys/kernel/security/tomoyo/ interface.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @buffer: Pointer to buffer to read from.
* @buffer_len: Size of @buffer.
*
* Returns @buffer_len on success, negative value otherwise.
*/
ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
const char __user *buffer, const int buffer_len)
{
int error = buffer_len;
size_t avail_len = buffer_len;
char *cp0 = head->write_buf;
int idx;
if (!head->write)
return -EINVAL;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
head->read_user_buf_avail = 0;
idx = tomoyo_read_lock();
/* Read a line and dispatch it to the policy handler. */
while (avail_len > 0) {
char c;
if (head->w.avail >= head->writebuf_size - 1) {
const int len = head->writebuf_size * 2;
char *cp = kzalloc(len, GFP_NOFS);
if (!cp) {
error = -ENOMEM;
break;
}
memmove(cp, cp0, head->w.avail);
kfree(cp0);
head->write_buf = cp;
cp0 = cp;
head->writebuf_size = len;
}
if (get_user(c, buffer)) {
error = -EFAULT;
break;
}
buffer++;
avail_len--;
cp0[head->w.avail++] = c;
if (c != '\n')
continue;
cp0[head->w.avail - 1] = '\0';
head->w.avail = 0;
tomoyo_normalize_line(cp0);
if (!strcmp(cp0, "reset")) {
head->w.ns = &tomoyo_kernel_namespace;
head->w.domain = NULL;
memset(&head->r, 0, sizeof(head->r));
continue;
}
/* Don't allow updating policies by non manager programs. */
switch (head->type) {
case TOMOYO_PROCESS_STATUS:
/* This does not write anything. */
break;
case TOMOYO_DOMAINPOLICY:
if (tomoyo_select_domain(head, cp0))
continue;
fallthrough;
case TOMOYO_EXCEPTIONPOLICY:
if (!strcmp(cp0, "select transition_only")) {
head->r.print_transition_related_only = true;
continue;
}
fallthrough;
default:
if (!tomoyo_manager()) {
error = -EPERM;
goto out;
}
}
switch (tomoyo_parse_policy(head, cp0)) {
case -EPERM:
error = -EPERM;
goto out;
case 0:
switch (head->type) {
case TOMOYO_DOMAINPOLICY:
case TOMOYO_EXCEPTIONPOLICY:
case TOMOYO_STAT:
case TOMOYO_PROFILE:
case TOMOYO_MANAGER:
tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
break;
default:
break;
}
break;
}
}
out:
tomoyo_read_unlock(idx);
mutex_unlock(&head->io_sem);
return error;
}
/**
* tomoyo_close_control - close() for /sys/kernel/security/tomoyo/ interface.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*/
void tomoyo_close_control(struct tomoyo_io_buffer *head)
{
/*
* If the file is /sys/kernel/security/tomoyo/query , decrement the
* observer counter.
*/
if (head->type == TOMOYO_QUERY &&
atomic_dec_and_test(&tomoyo_query_observers))
wake_up_all(&tomoyo_answer_wait);
tomoyo_notify_gc(head, false);
}
/**
* tomoyo_check_profile - Check all profiles currently assigned to domains are defined.
*/
void tomoyo_check_profile(void)
{
struct tomoyo_domain_info *domain;
const int idx = tomoyo_read_lock();
tomoyo_policy_loaded = true;
pr_info("TOMOYO: 2.6.0\n");
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
srcu_read_lock_held(&tomoyo_ss)) {
const u8 profile = domain->profile;
struct tomoyo_policy_namespace *ns = domain->ns;
if (ns->profile_version == 20110903) {
pr_info_once("Converting profile version from %u to %u.\n",
20110903, 20150505);
ns->profile_version = 20150505;
}
if (ns->profile_version != 20150505)
pr_err("Profile version %u is not supported.\n",
ns->profile_version);
else if (!ns->profile_ptr[profile])
pr_err("Profile %u (used by '%s') is not defined.\n",
profile, domain->domainname->name);
else
continue;
pr_err("Userland tools for TOMOYO 2.6 must be installed and policy must be initialized.\n");
pr_err("Please see https://tomoyo.osdn.jp/2.6/ for more information.\n");
panic("STOP!");
}
tomoyo_read_unlock(idx);
pr_info("Mandatory Access Control activated.\n");
}
/**
* tomoyo_load_builtin_policy - Load built-in policy.
*
* Returns nothing.
*/
void __init tomoyo_load_builtin_policy(void)
{
#ifdef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
static char tomoyo_builtin_profile[] __initdata =
"PROFILE_VERSION=20150505\n"
"0-CONFIG={ mode=learning grant_log=no reject_log=yes }\n";
static char tomoyo_builtin_exception_policy[] __initdata =
"aggregator proc:/self/exe /proc/self/exe\n";
static char tomoyo_builtin_domain_policy[] __initdata = "";
static char tomoyo_builtin_manager[] __initdata = "";
static char tomoyo_builtin_stat[] __initdata = "";
#else
/*
* This include file is manually created and contains built-in policy
* named "tomoyo_builtin_profile", "tomoyo_builtin_exception_policy",
* "tomoyo_builtin_domain_policy", "tomoyo_builtin_manager",
* "tomoyo_builtin_stat" in the form of "static char [] __initdata".
*/
#include "builtin-policy.h"
#endif
u8 i;
const int idx = tomoyo_read_lock();
for (i = 0; i < 5; i++) {
struct tomoyo_io_buffer head = { };
char *start = "";
switch (i) {
case 0:
start = tomoyo_builtin_profile;
head.type = TOMOYO_PROFILE;
head.write = tomoyo_write_profile;
break;
case 1:
start = tomoyo_builtin_exception_policy;
head.type = TOMOYO_EXCEPTIONPOLICY;
head.write = tomoyo_write_exception;
break;
case 2:
start = tomoyo_builtin_domain_policy;
head.type = TOMOYO_DOMAINPOLICY;
head.write = tomoyo_write_domain;
break;
case 3:
start = tomoyo_builtin_manager;
head.type = TOMOYO_MANAGER;
head.write = tomoyo_write_manager;
break;
case 4:
start = tomoyo_builtin_stat;
head.type = TOMOYO_STAT;
head.write = tomoyo_write_stat;
break;
}
while (1) {
char *end = strchr(start, '\n');
if (!end)
break;
*end = '\0';
tomoyo_normalize_line(start);
head.write_buf = start;
tomoyo_parse_policy(&head, start);
start = end + 1;
}
}
tomoyo_read_unlock(idx);
#ifdef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
tomoyo_check_profile();
#endif
}
| linux-master | security/tomoyo/common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* security/tomoyo/securityfs_if.c
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include <linux/security.h>
#include "common.h"
/**
* tomoyo_check_task_acl - Check permission for task operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @ptr: Pointer to "struct tomoyo_acl_info".
*
* Returns true if granted, false otherwise.
*/
static bool tomoyo_check_task_acl(struct tomoyo_request_info *r,
const struct tomoyo_acl_info *ptr)
{
const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl),
head);
return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname);
}
/**
* tomoyo_write_self - write() for /sys/kernel/security/tomoyo/self_domain interface.
*
* @file: Pointer to "struct file".
* @buf: Domainname to transit to.
* @count: Size of @buf.
* @ppos: Unused.
*
* Returns @count on success, negative value otherwise.
*
* If domain transition was permitted but the domain transition failed, this
* function returns error rather than terminating current thread with SIGKILL.
*/
static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char *data;
int error;
if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10)
return -ENOMEM;
data = memdup_user_nul(buf, count);
if (IS_ERR(data))
return PTR_ERR(data);
tomoyo_normalize_line(data);
if (tomoyo_correct_domain(data)) {
const int idx = tomoyo_read_lock();
struct tomoyo_path_info name;
struct tomoyo_request_info r;
name.name = data;
tomoyo_fill_path_info(&name);
/* Check "task manual_domain_transition" permission. */
tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE);
r.param_type = TOMOYO_TYPE_MANUAL_TASK_ACL;
r.param.task.domainname = &name;
tomoyo_check_acl(&r, tomoyo_check_task_acl);
if (!r.granted)
error = -EPERM;
else {
struct tomoyo_domain_info *new_domain =
tomoyo_assign_domain(data, true);
if (!new_domain) {
error = -ENOENT;
} else {
struct tomoyo_task *s = tomoyo_task(current);
struct tomoyo_domain_info *old_domain =
s->domain_info;
s->domain_info = new_domain;
atomic_inc(&new_domain->users);
atomic_dec(&old_domain->users);
error = 0;
}
}
tomoyo_read_unlock(idx);
} else
error = -EINVAL;
kfree(data);
return error ? error : count;
}
/**
* tomoyo_read_self - read() for /sys/kernel/security/tomoyo/self_domain interface.
*
* @file: Pointer to "struct file".
* @buf: Domainname which current thread belongs to.
* @count: Size of @buf.
* @ppos: Bytes read by now.
*
* Returns read size on success, negative value otherwise.
*/
static ssize_t tomoyo_read_self(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
const char *domain = tomoyo_domain()->domainname->name;
loff_t len = strlen(domain);
loff_t pos = *ppos;
if (pos >= len || !count)
return 0;
len -= pos;
if (count < len)
len = count;
if (copy_to_user(buf, domain + pos, len))
return -EFAULT;
*ppos += len;
return len;
}
/* Operations for /sys/kernel/security/tomoyo/self_domain interface. */
static const struct file_operations tomoyo_self_operations = {
.write = tomoyo_write_self,
.read = tomoyo_read_self,
};
/**
* tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
*
* @inode: Pointer to "struct inode".
* @file: Pointer to "struct file".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_open(struct inode *inode, struct file *file)
{
const u8 key = (uintptr_t) file_inode(file)->i_private;
return tomoyo_open_control(key, file);
}
/**
* tomoyo_release - close() for /sys/kernel/security/tomoyo/ interface.
*
* @inode: Pointer to "struct inode".
* @file: Pointer to "struct file".
*
*/
static int tomoyo_release(struct inode *inode, struct file *file)
{
tomoyo_close_control(file->private_data);
return 0;
}
/**
* tomoyo_poll - poll() for /sys/kernel/security/tomoyo/ interface.
*
* @file: Pointer to "struct file".
* @wait: Pointer to "poll_table". Maybe NULL.
*
* Returns EPOLLIN | EPOLLRDNORM | EPOLLOUT | EPOLLWRNORM if ready to read/write,
* EPOLLOUT | EPOLLWRNORM otherwise.
*/
static __poll_t tomoyo_poll(struct file *file, poll_table *wait)
{
return tomoyo_poll_control(file, wait);
}
/**
* tomoyo_read - read() for /sys/kernel/security/tomoyo/ interface.
*
* @file: Pointer to "struct file".
* @buf: Pointer to buffer.
* @count: Size of @buf.
* @ppos: Unused.
*
* Returns bytes read on success, negative value otherwise.
*/
static ssize_t tomoyo_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
return tomoyo_read_control(file->private_data, buf, count);
}
/**
* tomoyo_write - write() for /sys/kernel/security/tomoyo/ interface.
*
* @file: Pointer to "struct file".
* @buf: Pointer to buffer.
* @count: Size of @buf.
* @ppos: Unused.
*
* Returns @count on success, negative value otherwise.
*/
static ssize_t tomoyo_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
return tomoyo_write_control(file->private_data, buf, count);
}
/*
* tomoyo_operations is a "struct file_operations" which is used for handling
* /sys/kernel/security/tomoyo/ interface.
*
* Some files under /sys/kernel/security/tomoyo/ directory accept open(O_RDWR).
* See tomoyo_io_buffer for internals.
*/
static const struct file_operations tomoyo_operations = {
.open = tomoyo_open,
.release = tomoyo_release,
.poll = tomoyo_poll,
.read = tomoyo_read,
.write = tomoyo_write,
.llseek = noop_llseek,
};
/**
* tomoyo_create_entry - Create interface files under /sys/kernel/security/tomoyo/ directory.
*
* @name: The name of the interface file.
* @mode: The permission of the interface file.
* @parent: The parent directory.
* @key: Type of interface.
*
* Returns nothing.
*/
static void __init tomoyo_create_entry(const char *name, const umode_t mode,
struct dentry *parent, const u8 key)
{
securityfs_create_file(name, mode, parent, (void *) (uintptr_t) key,
&tomoyo_operations);
}
/**
* tomoyo_initerface_init - Initialize /sys/kernel/security/tomoyo/ interface.
*
* Returns 0.
*/
static int __init tomoyo_initerface_init(void)
{
struct tomoyo_domain_info *domain;
struct dentry *tomoyo_dir;
if (!tomoyo_enabled)
return 0;
domain = tomoyo_domain();
/* Don't create securityfs entries unless registered. */
if (domain != &tomoyo_kernel_domain)
return 0;
tomoyo_dir = securityfs_create_dir("tomoyo", NULL);
tomoyo_create_entry("query", 0600, tomoyo_dir,
TOMOYO_QUERY);
tomoyo_create_entry("domain_policy", 0600, tomoyo_dir,
TOMOYO_DOMAINPOLICY);
tomoyo_create_entry("exception_policy", 0600, tomoyo_dir,
TOMOYO_EXCEPTIONPOLICY);
tomoyo_create_entry("audit", 0400, tomoyo_dir,
TOMOYO_AUDIT);
tomoyo_create_entry(".process_status", 0600, tomoyo_dir,
TOMOYO_PROCESS_STATUS);
tomoyo_create_entry("stat", 0644, tomoyo_dir,
TOMOYO_STAT);
tomoyo_create_entry("profile", 0600, tomoyo_dir,
TOMOYO_PROFILE);
tomoyo_create_entry("manager", 0600, tomoyo_dir,
TOMOYO_MANAGER);
tomoyo_create_entry("version", 0400, tomoyo_dir,
TOMOYO_VERSION);
securityfs_create_file("self_domain", 0666, tomoyo_dir, NULL,
&tomoyo_self_operations);
tomoyo_load_builtin_policy();
return 0;
}
fs_initcall(tomoyo_initerface_init);
| linux-master | security/tomoyo/securityfs_if.c |
// SPDX-License-Identifier: GPL-2.0
/*
* security/tomoyo/memory.c
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include <linux/hash.h>
#include <linux/slab.h>
#include "common.h"
/**
* tomoyo_warn_oom - Print out of memory warning message.
*
* @function: Function's name.
*/
void tomoyo_warn_oom(const char *function)
{
/* Reduce error messages. */
static pid_t tomoyo_last_pid;
const pid_t pid = current->pid;
if (tomoyo_last_pid != pid) {
pr_warn("ERROR: Out of memory at %s.\n", function);
tomoyo_last_pid = pid;
}
if (!tomoyo_policy_loaded)
panic("MAC Initialization failed.\n");
}
/* Memoy currently used by policy/audit log/query. */
unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
/* Memory quota for "policy"/"audit log"/"query". */
unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
/**
* tomoyo_memory_ok - Check memory quota.
*
* @ptr: Pointer to allocated memory.
*
* Returns true on success, false otherwise.
*
* Returns true if @ptr is not NULL and quota not exceeded, false otherwise.
*
* Caller holds tomoyo_policy_lock mutex.
*/
bool tomoyo_memory_ok(void *ptr)
{
if (ptr) {
const size_t s = ksize(ptr);
tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s;
if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
tomoyo_memory_quota[TOMOYO_MEMORY_POLICY])
return true;
tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
}
tomoyo_warn_oom(__func__);
return false;
}
/**
* tomoyo_commit_ok - Check memory quota.
*
* @data: Data to copy from.
* @size: Size in byte.
*
* Returns pointer to allocated memory on success, NULL otherwise.
* @data is zero-cleared on success.
*
* Caller holds tomoyo_policy_lock mutex.
*/
void *tomoyo_commit_ok(void *data, const unsigned int size)
{
void *ptr = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
if (tomoyo_memory_ok(ptr)) {
memmove(ptr, data, size);
memset(data, 0, size);
return ptr;
}
kfree(ptr);
return NULL;
}
/**
* tomoyo_get_group - Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group".
*
* @param: Pointer to "struct tomoyo_acl_param".
* @idx: Index number.
*
* Returns pointer to "struct tomoyo_group" on success, NULL otherwise.
*/
struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
const u8 idx)
{
struct tomoyo_group e = { };
struct tomoyo_group *group = NULL;
struct list_head *list;
const char *group_name = tomoyo_read_token(param);
bool found = false;
if (!tomoyo_correct_word(group_name) || idx >= TOMOYO_MAX_GROUP)
return NULL;
e.group_name = tomoyo_get_name(group_name);
if (!e.group_name)
return NULL;
if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
list = ¶m->ns->group_list[idx];
list_for_each_entry(group, list, head.list) {
if (e.group_name != group->group_name ||
atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&group->head.users);
found = true;
break;
}
if (!found) {
struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e));
if (entry) {
INIT_LIST_HEAD(&entry->member_list);
atomic_set(&entry->head.users, 1);
list_add_tail_rcu(&entry->head.list, list);
group = entry;
found = true;
}
}
mutex_unlock(&tomoyo_policy_lock);
out:
tomoyo_put_name(e.group_name);
return found ? group : NULL;
}
/*
* tomoyo_name_list is used for holding string data used by TOMOYO.
* Since same string data is likely used for multiple times (e.g.
* "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
* "const struct tomoyo_path_info *".
*/
struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
/**
* tomoyo_get_name - Allocate permanent memory for string data.
*
* @name: The string to store into the permernent memory.
*
* Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
*/
const struct tomoyo_path_info *tomoyo_get_name(const char *name)
{
struct tomoyo_name *ptr;
unsigned int hash;
int len;
struct list_head *head;
if (!name)
return NULL;
len = strlen(name) + 1;
hash = full_name_hash(NULL, (const unsigned char *) name, len - 1);
head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return NULL;
list_for_each_entry(ptr, head, head.list) {
if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) ||
atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&ptr->head.users);
goto out;
}
ptr = kzalloc(sizeof(*ptr) + len, GFP_NOFS | __GFP_NOWARN);
if (tomoyo_memory_ok(ptr)) {
ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
memmove((char *) ptr->entry.name, name, len);
atomic_set(&ptr->head.users, 1);
tomoyo_fill_path_info(&ptr->entry);
list_add_tail(&ptr->head.list, head);
} else {
kfree(ptr);
ptr = NULL;
}
out:
mutex_unlock(&tomoyo_policy_lock);
return ptr ? &ptr->entry : NULL;
}
/* Initial namespace.*/
struct tomoyo_policy_namespace tomoyo_kernel_namespace;
/**
* tomoyo_mm_init - Initialize mm related code.
*/
void __init tomoyo_mm_init(void)
{
int idx;
for (idx = 0; idx < TOMOYO_MAX_HASH; idx++)
INIT_LIST_HEAD(&tomoyo_name_list[idx]);
tomoyo_kernel_namespace.name = "<kernel>";
tomoyo_init_policy_namespace(&tomoyo_kernel_namespace);
tomoyo_kernel_domain.ns = &tomoyo_kernel_namespace;
INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
tomoyo_kernel_domain.domainname = tomoyo_get_name("<kernel>");
list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
}
| linux-master | security/tomoyo/memory.c |
// SPDX-License-Identifier: GPL-2.0
/*
* security/tomoyo/tomoyo.c
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include <linux/lsm_hooks.h>
#include "common.h"
/**
* tomoyo_domain - Get "struct tomoyo_domain_info" for current thread.
*
* Returns pointer to "struct tomoyo_domain_info" for current thread.
*/
struct tomoyo_domain_info *tomoyo_domain(void)
{
struct tomoyo_task *s = tomoyo_task(current);
if (s->old_domain_info && !current->in_execve) {
atomic_dec(&s->old_domain_info->users);
s->old_domain_info = NULL;
}
return s->domain_info;
}
/**
* tomoyo_cred_prepare - Target for security_prepare_creds().
*
* @new: Pointer to "struct cred".
* @old: Pointer to "struct cred".
* @gfp: Memory allocation flags.
*
* Returns 0.
*/
static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
/* Restore old_domain_info saved by previous execve() request. */
struct tomoyo_task *s = tomoyo_task(current);
if (s->old_domain_info && !current->in_execve) {
atomic_dec(&s->domain_info->users);
s->domain_info = s->old_domain_info;
s->old_domain_info = NULL;
}
return 0;
}
/**
* tomoyo_bprm_committed_creds - Target for security_bprm_committed_creds().
*
* @bprm: Pointer to "struct linux_binprm".
*/
static void tomoyo_bprm_committed_creds(struct linux_binprm *bprm)
{
/* Clear old_domain_info saved by execve() request. */
struct tomoyo_task *s = tomoyo_task(current);
atomic_dec(&s->old_domain_info->users);
s->old_domain_info = NULL;
}
#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
/**
* tomoyo_bprm_creds_for_exec - Target for security_bprm_creds_for_exec().
*
* @bprm: Pointer to "struct linux_binprm".
*
* Returns 0.
*/
static int tomoyo_bprm_creds_for_exec(struct linux_binprm *bprm)
{
/*
* Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
* for the first time.
*/
if (!tomoyo_policy_loaded)
tomoyo_load_policy(bprm->filename);
return 0;
}
#endif
/**
* tomoyo_bprm_check_security - Target for security_bprm_check().
*
* @bprm: Pointer to "struct linux_binprm".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
{
struct tomoyo_task *s = tomoyo_task(current);
/*
* Execute permission is checked against pathname passed to execve()
* using current domain.
*/
if (!s->old_domain_info) {
const int idx = tomoyo_read_lock();
const int err = tomoyo_find_next_domain(bprm);
tomoyo_read_unlock(idx);
return err;
}
/*
* Read permission is checked against interpreters using next domain.
*/
return tomoyo_check_open_permission(s->domain_info,
&bprm->file->f_path, O_RDONLY);
}
/**
* tomoyo_inode_getattr - Target for security_inode_getattr().
*
* @path: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_inode_getattr(const struct path *path)
{
return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, path, NULL);
}
/**
* tomoyo_path_truncate - Target for security_path_truncate().
*
* @path: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_truncate(const struct path *path)
{
return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL);
}
/**
* tomoyo_file_truncate - Target for security_file_truncate().
*
* @file: Pointer to "struct file".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_file_truncate(struct file *file)
{
return tomoyo_path_truncate(&file->f_path);
}
/**
* tomoyo_path_unlink - Target for security_path_unlink().
*
* @parent: Pointer to "struct path".
* @dentry: Pointer to "struct dentry".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_unlink(const struct path *parent, struct dentry *dentry)
{
struct path path = { .mnt = parent->mnt, .dentry = dentry };
return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL);
}
/**
* tomoyo_path_mkdir - Target for security_path_mkdir().
*
* @parent: Pointer to "struct path".
* @dentry: Pointer to "struct dentry".
* @mode: DAC permission mode.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_mkdir(const struct path *parent, struct dentry *dentry,
umode_t mode)
{
struct path path = { .mnt = parent->mnt, .dentry = dentry };
return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
mode & S_IALLUGO);
}
/**
* tomoyo_path_rmdir - Target for security_path_rmdir().
*
* @parent: Pointer to "struct path".
* @dentry: Pointer to "struct dentry".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_rmdir(const struct path *parent, struct dentry *dentry)
{
struct path path = { .mnt = parent->mnt, .dentry = dentry };
return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL);
}
/**
* tomoyo_path_symlink - Target for security_path_symlink().
*
* @parent: Pointer to "struct path".
* @dentry: Pointer to "struct dentry".
* @old_name: Symlink's content.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_symlink(const struct path *parent, struct dentry *dentry,
const char *old_name)
{
struct path path = { .mnt = parent->mnt, .dentry = dentry };
return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name);
}
/**
* tomoyo_path_mknod - Target for security_path_mknod().
*
* @parent: Pointer to "struct path".
* @dentry: Pointer to "struct dentry".
* @mode: DAC permission mode.
* @dev: Device attributes.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_mknod(const struct path *parent, struct dentry *dentry,
umode_t mode, unsigned int dev)
{
struct path path = { .mnt = parent->mnt, .dentry = dentry };
int type = TOMOYO_TYPE_CREATE;
const unsigned int perm = mode & S_IALLUGO;
switch (mode & S_IFMT) {
case S_IFCHR:
type = TOMOYO_TYPE_MKCHAR;
break;
case S_IFBLK:
type = TOMOYO_TYPE_MKBLOCK;
break;
default:
goto no_dev;
}
return tomoyo_mkdev_perm(type, &path, perm, dev);
no_dev:
switch (mode & S_IFMT) {
case S_IFIFO:
type = TOMOYO_TYPE_MKFIFO;
break;
case S_IFSOCK:
type = TOMOYO_TYPE_MKSOCK;
break;
}
return tomoyo_path_number_perm(type, &path, perm);
}
/**
* tomoyo_path_link - Target for security_path_link().
*
* @old_dentry: Pointer to "struct dentry".
* @new_dir: Pointer to "struct path".
* @new_dentry: Pointer to "struct dentry".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_link(struct dentry *old_dentry, const struct path *new_dir,
struct dentry *new_dentry)
{
struct path path1 = { .mnt = new_dir->mnt, .dentry = old_dentry };
struct path path2 = { .mnt = new_dir->mnt, .dentry = new_dentry };
return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
}
/**
* tomoyo_path_rename - Target for security_path_rename().
*
* @old_parent: Pointer to "struct path".
* @old_dentry: Pointer to "struct dentry".
* @new_parent: Pointer to "struct path".
* @new_dentry: Pointer to "struct dentry".
* @flags: Rename options.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_rename(const struct path *old_parent,
struct dentry *old_dentry,
const struct path *new_parent,
struct dentry *new_dentry,
const unsigned int flags)
{
struct path path1 = { .mnt = old_parent->mnt, .dentry = old_dentry };
struct path path2 = { .mnt = new_parent->mnt, .dentry = new_dentry };
if (flags & RENAME_EXCHANGE) {
const int err = tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path2,
&path1);
if (err)
return err;
}
return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
}
/**
* tomoyo_file_fcntl - Target for security_file_fcntl().
*
* @file: Pointer to "struct file".
* @cmd: Command for fcntl().
* @arg: Argument for @cmd.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
if (!(cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND)))
return 0;
return tomoyo_check_open_permission(tomoyo_domain(), &file->f_path,
O_WRONLY | (arg & O_APPEND));
}
/**
* tomoyo_file_open - Target for security_file_open().
*
* @f: Pointer to "struct file".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_file_open(struct file *f)
{
/* Don't check read permission here if called from execve(). */
if (current->in_execve)
return 0;
return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path,
f->f_flags);
}
/**
* tomoyo_file_ioctl - Target for security_file_ioctl().
*
* @file: Pointer to "struct file".
* @cmd: Command for ioctl().
* @arg: Argument for @cmd.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return tomoyo_path_number_perm(TOMOYO_TYPE_IOCTL, &file->f_path, cmd);
}
/**
* tomoyo_path_chmod - Target for security_path_chmod().
*
* @path: Pointer to "struct path".
* @mode: DAC permission mode.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_chmod(const struct path *path, umode_t mode)
{
return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path,
mode & S_IALLUGO);
}
/**
* tomoyo_path_chown - Target for security_path_chown().
*
* @path: Pointer to "struct path".
* @uid: Owner ID.
* @gid: Group ID.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
{
int error = 0;
if (uid_valid(uid))
error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path,
from_kuid(&init_user_ns, uid));
if (!error && gid_valid(gid))
error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path,
from_kgid(&init_user_ns, gid));
return error;
}
/**
* tomoyo_path_chroot - Target for security_path_chroot().
*
* @path: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_path_chroot(const struct path *path)
{
return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL);
}
/**
* tomoyo_sb_mount - Target for security_sb_mount().
*
* @dev_name: Name of device file. Maybe NULL.
* @path: Pointer to "struct path".
* @type: Name of filesystem type. Maybe NULL.
* @flags: Mount options.
* @data: Optional data. Maybe NULL.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_sb_mount(const char *dev_name, const struct path *path,
const char *type, unsigned long flags, void *data)
{
return tomoyo_mount_permission(dev_name, path, type, flags, data);
}
/**
* tomoyo_sb_umount - Target for security_sb_umount().
*
* @mnt: Pointer to "struct vfsmount".
* @flags: Unmount options.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
{
struct path path = { .mnt = mnt, .dentry = mnt->mnt_root };
return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL);
}
/**
* tomoyo_sb_pivotroot - Target for security_sb_pivotroot().
*
* @old_path: Pointer to "struct path".
* @new_path: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_sb_pivotroot(const struct path *old_path, const struct path *new_path)
{
return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
}
/**
* tomoyo_socket_listen - Check permission for listen().
*
* @sock: Pointer to "struct socket".
* @backlog: Backlog parameter.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_socket_listen(struct socket *sock, int backlog)
{
return tomoyo_socket_listen_permission(sock);
}
/**
* tomoyo_socket_connect - Check permission for connect().
*
* @sock: Pointer to "struct socket".
* @addr: Pointer to "struct sockaddr".
* @addr_len: Size of @addr.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
return tomoyo_socket_connect_permission(sock, addr, addr_len);
}
/**
* tomoyo_socket_bind - Check permission for bind().
*
* @sock: Pointer to "struct socket".
* @addr: Pointer to "struct sockaddr".
* @addr_len: Size of @addr.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
return tomoyo_socket_bind_permission(sock, addr, addr_len);
}
/**
* tomoyo_socket_sendmsg - Check permission for sendmsg().
*
* @sock: Pointer to "struct socket".
* @msg: Pointer to "struct msghdr".
* @size: Size of message.
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
int size)
{
return tomoyo_socket_sendmsg_permission(sock, msg, size);
}
struct lsm_blob_sizes tomoyo_blob_sizes __ro_after_init = {
.lbs_task = sizeof(struct tomoyo_task),
};
/**
* tomoyo_task_alloc - Target for security_task_alloc().
*
* @task: Pointer to "struct task_struct".
* @clone_flags: clone() flags.
*
* Returns 0.
*/
static int tomoyo_task_alloc(struct task_struct *task,
unsigned long clone_flags)
{
struct tomoyo_task *old = tomoyo_task(current);
struct tomoyo_task *new = tomoyo_task(task);
new->domain_info = old->domain_info;
atomic_inc(&new->domain_info->users);
new->old_domain_info = NULL;
return 0;
}
/**
* tomoyo_task_free - Target for security_task_free().
*
* @task: Pointer to "struct task_struct".
*/
static void tomoyo_task_free(struct task_struct *task)
{
struct tomoyo_task *s = tomoyo_task(task);
if (s->domain_info) {
atomic_dec(&s->domain_info->users);
s->domain_info = NULL;
}
if (s->old_domain_info) {
atomic_dec(&s->old_domain_info->users);
s->old_domain_info = NULL;
}
}
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
*/
static struct security_hook_list tomoyo_hooks[] __ro_after_init = {
LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare),
LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds),
LSM_HOOK_INIT(task_alloc, tomoyo_task_alloc),
LSM_HOOK_INIT(task_free, tomoyo_task_free),
#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
LSM_HOOK_INIT(bprm_creds_for_exec, tomoyo_bprm_creds_for_exec),
#endif
LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security),
LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl),
LSM_HOOK_INIT(file_open, tomoyo_file_open),
LSM_HOOK_INIT(file_truncate, tomoyo_file_truncate),
LSM_HOOK_INIT(path_truncate, tomoyo_path_truncate),
LSM_HOOK_INIT(path_unlink, tomoyo_path_unlink),
LSM_HOOK_INIT(path_mkdir, tomoyo_path_mkdir),
LSM_HOOK_INIT(path_rmdir, tomoyo_path_rmdir),
LSM_HOOK_INIT(path_symlink, tomoyo_path_symlink),
LSM_HOOK_INIT(path_mknod, tomoyo_path_mknod),
LSM_HOOK_INIT(path_link, tomoyo_path_link),
LSM_HOOK_INIT(path_rename, tomoyo_path_rename),
LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr),
LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl),
LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod),
LSM_HOOK_INIT(path_chown, tomoyo_path_chown),
LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot),
LSM_HOOK_INIT(sb_mount, tomoyo_sb_mount),
LSM_HOOK_INIT(sb_umount, tomoyo_sb_umount),
LSM_HOOK_INIT(sb_pivotroot, tomoyo_sb_pivotroot),
LSM_HOOK_INIT(socket_bind, tomoyo_socket_bind),
LSM_HOOK_INIT(socket_connect, tomoyo_socket_connect),
LSM_HOOK_INIT(socket_listen, tomoyo_socket_listen),
LSM_HOOK_INIT(socket_sendmsg, tomoyo_socket_sendmsg),
};
/* Lock for GC. */
DEFINE_SRCU(tomoyo_ss);
int tomoyo_enabled __ro_after_init = 1;
/**
* tomoyo_init - Register TOMOYO Linux as a LSM module.
*
* Returns 0.
*/
static int __init tomoyo_init(void)
{
struct tomoyo_task *s = tomoyo_task(current);
/* register ourselves with the security framework */
security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks), "tomoyo");
pr_info("TOMOYO Linux initialized\n");
s->domain_info = &tomoyo_kernel_domain;
atomic_inc(&tomoyo_kernel_domain.users);
s->old_domain_info = NULL;
tomoyo_mm_init();
return 0;
}
DEFINE_LSM(tomoyo) = {
.name = "tomoyo",
.enabled = &tomoyo_enabled,
.flags = LSM_FLAG_LEGACY_MAJOR,
.blobs = &tomoyo_blob_sizes,
.init = tomoyo_init,
};
| linux-master | security/tomoyo/tomoyo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* security/tomoyo/util.c
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include <linux/slab.h>
#include <linux/rculist.h>
#include "common.h"
/* Lock for protecting policy. */
DEFINE_MUTEX(tomoyo_policy_lock);
/* Has /sbin/init started? */
bool tomoyo_policy_loaded;
/*
* Mapping table from "enum tomoyo_mac_index" to
* "enum tomoyo_mac_category_index".
*/
const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
/* CONFIG::file group */
[TOMOYO_MAC_FILE_EXECUTE] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_OPEN] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_CREATE] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_UNLINK] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_GETATTR] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_MKDIR] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_RMDIR] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_MKFIFO] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_MKSOCK] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_TRUNCATE] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_SYMLINK] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_MKBLOCK] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_MKCHAR] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_LINK] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_RENAME] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_CHMOD] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_CHOWN] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_CHGRP] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_IOCTL] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_CHROOT] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_MOUNT] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_UMOUNT] = TOMOYO_MAC_CATEGORY_FILE,
[TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE,
/* CONFIG::network group */
[TOMOYO_MAC_NETWORK_INET_STREAM_BIND] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_INET_RAW_BIND] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_INET_RAW_SEND] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] =
TOMOYO_MAC_CATEGORY_NETWORK,
[TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] =
TOMOYO_MAC_CATEGORY_NETWORK,
/* CONFIG::misc group */
[TOMOYO_MAC_ENVIRON] = TOMOYO_MAC_CATEGORY_MISC,
};
/**
* tomoyo_convert_time - Convert time_t to YYYY/MM/DD hh/mm/ss.
*
* @time64: Seconds since 1970/01/01 00:00:00.
* @stamp: Pointer to "struct tomoyo_time".
*
* Returns nothing.
*/
void tomoyo_convert_time(time64_t time64, struct tomoyo_time *stamp)
{
struct tm tm;
time64_to_tm(time64, 0, &tm);
stamp->sec = tm.tm_sec;
stamp->min = tm.tm_min;
stamp->hour = tm.tm_hour;
stamp->day = tm.tm_mday;
stamp->month = tm.tm_mon + 1;
stamp->year = tm.tm_year + 1900;
}
/**
* tomoyo_permstr - Find permission keywords.
*
* @string: String representation for permissions in foo/bar/buz format.
* @keyword: Keyword to find from @string/
*
* Returns true if @keyword was found in @string, false otherwise.
*
* This function assumes that strncmp(w1, w2, strlen(w1)) != 0 if w1 != w2.
*/
bool tomoyo_permstr(const char *string, const char *keyword)
{
const char *cp = strstr(string, keyword);
if (cp)
return cp == string || *(cp - 1) == '/';
return false;
}
/**
* tomoyo_read_token - Read a word from a line.
*
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns a word on success, "" otherwise.
*
* To allow the caller to skip NULL check, this function returns "" rather than
* NULL if there is no more words to read.
*/
char *tomoyo_read_token(struct tomoyo_acl_param *param)
{
char *pos = param->data;
char *del = strchr(pos, ' ');
if (del)
*del++ = '\0';
else
del = pos + strlen(pos);
param->data = del;
return pos;
}
static bool tomoyo_correct_path2(const char *filename, const size_t len);
/**
* tomoyo_get_domainname - Read a domainname from a line.
*
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns a domainname on success, NULL otherwise.
*/
const struct tomoyo_path_info *tomoyo_get_domainname
(struct tomoyo_acl_param *param)
{
char *start = param->data;
char *pos = start;
while (*pos) {
if (*pos++ != ' ' ||
tomoyo_correct_path2(pos, strchrnul(pos, ' ') - pos))
continue;
*(pos - 1) = '\0';
break;
}
param->data = pos;
if (tomoyo_correct_domain(start))
return tomoyo_get_name(start);
return NULL;
}
/**
* tomoyo_parse_ulong - Parse an "unsigned long" value.
*
* @result: Pointer to "unsigned long".
* @str: Pointer to string to parse.
*
* Returns one of values in "enum tomoyo_value_type".
*
* The @src is updated to point the first character after the value
* on success.
*/
u8 tomoyo_parse_ulong(unsigned long *result, char **str)
{
const char *cp = *str;
char *ep;
int base = 10;
if (*cp == '0') {
char c = *(cp + 1);
if (c == 'x' || c == 'X') {
base = 16;
cp += 2;
} else if (c >= '0' && c <= '7') {
base = 8;
cp++;
}
}
*result = simple_strtoul(cp, &ep, base);
if (cp == ep)
return TOMOYO_VALUE_TYPE_INVALID;
*str = ep;
switch (base) {
case 16:
return TOMOYO_VALUE_TYPE_HEXADECIMAL;
case 8:
return TOMOYO_VALUE_TYPE_OCTAL;
default:
return TOMOYO_VALUE_TYPE_DECIMAL;
}
}
/**
* tomoyo_print_ulong - Print an "unsigned long" value.
*
* @buffer: Pointer to buffer.
* @buffer_len: Size of @buffer.
* @value: An "unsigned long" value.
* @type: Type of @value.
*
* Returns nothing.
*/
void tomoyo_print_ulong(char *buffer, const int buffer_len,
const unsigned long value, const u8 type)
{
if (type == TOMOYO_VALUE_TYPE_DECIMAL)
snprintf(buffer, buffer_len, "%lu", value);
else if (type == TOMOYO_VALUE_TYPE_OCTAL)
snprintf(buffer, buffer_len, "0%lo", value);
else if (type == TOMOYO_VALUE_TYPE_HEXADECIMAL)
snprintf(buffer, buffer_len, "0x%lX", value);
else
snprintf(buffer, buffer_len, "type(%u)", type);
}
/**
* tomoyo_parse_name_union - Parse a tomoyo_name_union.
*
* @param: Pointer to "struct tomoyo_acl_param".
* @ptr: Pointer to "struct tomoyo_name_union".
*
* Returns true on success, false otherwise.
*/
bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
struct tomoyo_name_union *ptr)
{
char *filename;
if (param->data[0] == '@') {
param->data++;
ptr->group = tomoyo_get_group(param, TOMOYO_PATH_GROUP);
return ptr->group != NULL;
}
filename = tomoyo_read_token(param);
if (!tomoyo_correct_word(filename))
return false;
ptr->filename = tomoyo_get_name(filename);
return ptr->filename != NULL;
}
/**
* tomoyo_parse_number_union - Parse a tomoyo_number_union.
*
* @param: Pointer to "struct tomoyo_acl_param".
* @ptr: Pointer to "struct tomoyo_number_union".
*
* Returns true on success, false otherwise.
*/
bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
struct tomoyo_number_union *ptr)
{
char *data;
u8 type;
unsigned long v;
memset(ptr, 0, sizeof(*ptr));
if (param->data[0] == '@') {
param->data++;
ptr->group = tomoyo_get_group(param, TOMOYO_NUMBER_GROUP);
return ptr->group != NULL;
}
data = tomoyo_read_token(param);
type = tomoyo_parse_ulong(&v, &data);
if (type == TOMOYO_VALUE_TYPE_INVALID)
return false;
ptr->values[0] = v;
ptr->value_type[0] = type;
if (!*data) {
ptr->values[1] = v;
ptr->value_type[1] = type;
return true;
}
if (*data++ != '-')
return false;
type = tomoyo_parse_ulong(&v, &data);
if (type == TOMOYO_VALUE_TYPE_INVALID || *data || ptr->values[0] > v)
return false;
ptr->values[1] = v;
ptr->value_type[1] = type;
return true;
}
/**
* tomoyo_byte_range - Check whether the string is a \ooo style octal value.
*
* @str: Pointer to the string.
*
* Returns true if @str is a \ooo style octal value, false otherwise.
*
* TOMOYO uses \ooo style representation for 0x01 - 0x20 and 0x7F - 0xFF.
* This function verifies that \ooo is in valid range.
*/
static inline bool tomoyo_byte_range(const char *str)
{
return *str >= '0' && *str++ <= '3' &&
*str >= '0' && *str++ <= '7' &&
*str >= '0' && *str <= '7';
}
/**
* tomoyo_alphabet_char - Check whether the character is an alphabet.
*
* @c: The character to check.
*
* Returns true if @c is an alphabet character, false otherwise.
*/
static inline bool tomoyo_alphabet_char(const char c)
{
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
}
/**
* tomoyo_make_byte - Make byte value from three octal characters.
*
* @c1: The first character.
* @c2: The second character.
* @c3: The third character.
*
* Returns byte value.
*/
static inline u8 tomoyo_make_byte(const u8 c1, const u8 c2, const u8 c3)
{
return ((c1 - '0') << 6) + ((c2 - '0') << 3) + (c3 - '0');
}
/**
* tomoyo_valid - Check whether the character is a valid char.
*
* @c: The character to check.
*
* Returns true if @c is a valid character, false otherwise.
*/
static inline bool tomoyo_valid(const unsigned char c)
{
return c > ' ' && c < 127;
}
/**
* tomoyo_invalid - Check whether the character is an invalid char.
*
* @c: The character to check.
*
* Returns true if @c is an invalid character, false otherwise.
*/
static inline bool tomoyo_invalid(const unsigned char c)
{
return c && (c <= ' ' || c >= 127);
}
/**
* tomoyo_str_starts - Check whether the given string starts with the given keyword.
*
* @src: Pointer to pointer to the string.
* @find: Pointer to the keyword.
*
* Returns true if @src starts with @find, false otherwise.
*
* The @src is updated to point the first character after the @find
* if @src starts with @find.
*/
bool tomoyo_str_starts(char **src, const char *find)
{
const int len = strlen(find);
char *tmp = *src;
if (strncmp(tmp, find, len))
return false;
tmp += len;
*src = tmp;
return true;
}
/**
* tomoyo_normalize_line - Format string.
*
* @buffer: The line to normalize.
*
* Leading and trailing whitespaces are removed.
* Multiple whitespaces are packed into single space.
*
* Returns nothing.
*/
void tomoyo_normalize_line(unsigned char *buffer)
{
unsigned char *sp = buffer;
unsigned char *dp = buffer;
bool first = true;
while (tomoyo_invalid(*sp))
sp++;
while (*sp) {
if (!first)
*dp++ = ' ';
first = false;
while (tomoyo_valid(*sp))
*dp++ = *sp++;
while (tomoyo_invalid(*sp))
sp++;
}
*dp = '\0';
}
/**
* tomoyo_correct_word2 - Validate a string.
*
* @string: The string to check. Maybe non-'\0'-terminated.
* @len: Length of @string.
*
* Check whether the given string follows the naming rules.
* Returns true if @string follows the naming rules, false otherwise.
*/
static bool tomoyo_correct_word2(const char *string, size_t len)
{
u8 recursion = 20;
const char *const start = string;
bool in_repetition = false;
if (!len)
goto out;
while (len--) {
unsigned char c = *string++;
if (c == '\\') {
if (!len--)
goto out;
c = *string++;
if (c >= '0' && c <= '3') {
unsigned char d;
unsigned char e;
if (!len-- || !len--)
goto out;
d = *string++;
e = *string++;
if (d < '0' || d > '7' || e < '0' || e > '7')
goto out;
c = tomoyo_make_byte(c, d, e);
if (c <= ' ' || c >= 127)
continue;
goto out;
}
switch (c) {
case '\\': /* "\\" */
case '+': /* "\+" */
case '?': /* "\?" */
case 'x': /* "\x" */
case 'a': /* "\a" */
case '-': /* "\-" */
continue;
}
if (!recursion--)
goto out;
switch (c) {
case '*': /* "\*" */
case '@': /* "\@" */
case '$': /* "\$" */
case 'X': /* "\X" */
case 'A': /* "\A" */
continue;
case '{': /* "/\{" */
if (string - 3 < start || *(string - 3) != '/')
goto out;
in_repetition = true;
continue;
case '}': /* "\}/" */
if (*string != '/')
goto out;
if (!in_repetition)
goto out;
in_repetition = false;
continue;
}
goto out;
} else if (in_repetition && c == '/') {
goto out;
} else if (c <= ' ' || c >= 127) {
goto out;
}
}
if (in_repetition)
goto out;
return true;
out:
return false;
}
/**
* tomoyo_correct_word - Validate a string.
*
* @string: The string to check.
*
* Check whether the given string follows the naming rules.
* Returns true if @string follows the naming rules, false otherwise.
*/
bool tomoyo_correct_word(const char *string)
{
return tomoyo_correct_word2(string, strlen(string));
}
/**
* tomoyo_correct_path2 - Check whether the given pathname follows the naming rules.
*
* @filename: The pathname to check.
* @len: Length of @filename.
*
* Returns true if @filename follows the naming rules, false otherwise.
*/
static bool tomoyo_correct_path2(const char *filename, const size_t len)
{
const char *cp1 = memchr(filename, '/', len);
const char *cp2 = memchr(filename, '.', len);
return cp1 && (!cp2 || (cp1 < cp2)) && tomoyo_correct_word2(filename, len);
}
/**
* tomoyo_correct_path - Validate a pathname.
*
* @filename: The pathname to check.
*
* Check whether the given pathname follows the naming rules.
* Returns true if @filename follows the naming rules, false otherwise.
*/
bool tomoyo_correct_path(const char *filename)
{
return tomoyo_correct_path2(filename, strlen(filename));
}
/**
* tomoyo_correct_domain - Check whether the given domainname follows the naming rules.
*
* @domainname: The domainname to check.
*
* Returns true if @domainname follows the naming rules, false otherwise.
*/
bool tomoyo_correct_domain(const unsigned char *domainname)
{
if (!domainname || !tomoyo_domain_def(domainname))
return false;
domainname = strchr(domainname, ' ');
if (!domainname++)
return true;
while (1) {
const unsigned char *cp = strchr(domainname, ' ');
if (!cp)
break;
if (!tomoyo_correct_path2(domainname, cp - domainname))
return false;
domainname = cp + 1;
}
return tomoyo_correct_path(domainname);
}
/**
* tomoyo_domain_def - Check whether the given token can be a domainname.
*
* @buffer: The token to check.
*
* Returns true if @buffer possibly be a domainname, false otherwise.
*/
bool tomoyo_domain_def(const unsigned char *buffer)
{
const unsigned char *cp;
int len;
if (*buffer != '<')
return false;
cp = strchr(buffer, ' ');
if (!cp)
len = strlen(buffer);
else
len = cp - buffer;
if (buffer[len - 1] != '>' ||
!tomoyo_correct_word2(buffer + 1, len - 2))
return false;
return true;
}
/**
* tomoyo_find_domain - Find a domain by the given name.
*
* @domainname: The domainname to find.
*
* Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
*
* Caller holds tomoyo_read_lock().
*/
struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
{
struct tomoyo_domain_info *domain;
struct tomoyo_path_info name;
name.name = domainname;
tomoyo_fill_path_info(&name);
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
srcu_read_lock_held(&tomoyo_ss)) {
if (!domain->is_deleted &&
!tomoyo_pathcmp(&name, domain->domainname))
return domain;
}
return NULL;
}
/**
* tomoyo_const_part_length - Evaluate the initial length without a pattern in a token.
*
* @filename: The string to evaluate.
*
* Returns the initial length without a pattern in @filename.
*/
static int tomoyo_const_part_length(const char *filename)
{
char c;
int len = 0;
if (!filename)
return 0;
while ((c = *filename++) != '\0') {
if (c != '\\') {
len++;
continue;
}
c = *filename++;
switch (c) {
case '\\': /* "\\" */
len += 2;
continue;
case '0': /* "\ooo" */
case '1':
case '2':
case '3':
c = *filename++;
if (c < '0' || c > '7')
break;
c = *filename++;
if (c < '0' || c > '7')
break;
len += 4;
continue;
}
break;
}
return len;
}
/**
* tomoyo_fill_path_info - Fill in "struct tomoyo_path_info" members.
*
* @ptr: Pointer to "struct tomoyo_path_info" to fill in.
*
* The caller sets "struct tomoyo_path_info"->name.
*/
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr)
{
const char *name = ptr->name;
const int len = strlen(name);
ptr->const_len = tomoyo_const_part_length(name);
ptr->is_dir = len && (name[len - 1] == '/');
ptr->is_patterned = (ptr->const_len < len);
ptr->hash = full_name_hash(NULL, name, len);
}
/**
* tomoyo_file_matches_pattern2 - Pattern matching without '/' character and "\-" pattern.
*
* @filename: The start of string to check.
* @filename_end: The end of string to check.
* @pattern: The start of pattern to compare.
* @pattern_end: The end of pattern to compare.
*
* Returns true if @filename matches @pattern, false otherwise.
*/
static bool tomoyo_file_matches_pattern2(const char *filename,
const char *filename_end,
const char *pattern,
const char *pattern_end)
{
while (filename < filename_end && pattern < pattern_end) {
char c;
int i;
int j;
if (*pattern != '\\') {
if (*filename++ != *pattern++)
return false;
continue;
}
c = *filename;
pattern++;
switch (*pattern) {
case '?':
if (c == '/') {
return false;
} else if (c == '\\') {
if (filename[1] == '\\')
filename++;
else if (tomoyo_byte_range(filename + 1))
filename += 3;
else
return false;
}
break;
case '\\':
if (c != '\\')
return false;
if (*++filename != '\\')
return false;
break;
case '+':
if (!isdigit(c))
return false;
break;
case 'x':
if (!isxdigit(c))
return false;
break;
case 'a':
if (!tomoyo_alphabet_char(c))
return false;
break;
case '0':
case '1':
case '2':
case '3':
if (c == '\\' && tomoyo_byte_range(filename + 1)
&& strncmp(filename + 1, pattern, 3) == 0) {
filename += 3;
pattern += 2;
break;
}
return false; /* Not matched. */
case '*':
case '@':
for (i = 0; i <= filename_end - filename; i++) {
if (tomoyo_file_matches_pattern2(
filename + i, filename_end,
pattern + 1, pattern_end))
return true;
c = filename[i];
if (c == '.' && *pattern == '@')
break;
if (c != '\\')
continue;
if (filename[i + 1] == '\\')
i++;
else if (tomoyo_byte_range(filename + i + 1))
i += 3;
else
break; /* Bad pattern. */
}
return false; /* Not matched. */
default:
j = 0;
c = *pattern;
if (c == '$') {
while (isdigit(filename[j]))
j++;
} else if (c == 'X') {
while (isxdigit(filename[j]))
j++;
} else if (c == 'A') {
while (tomoyo_alphabet_char(filename[j]))
j++;
}
for (i = 1; i <= j; i++) {
if (tomoyo_file_matches_pattern2(
filename + i, filename_end,
pattern + 1, pattern_end))
return true;
}
return false; /* Not matched or bad pattern. */
}
filename++;
pattern++;
}
while (*pattern == '\\' &&
(*(pattern + 1) == '*' || *(pattern + 1) == '@'))
pattern += 2;
return filename == filename_end && pattern == pattern_end;
}
/**
* tomoyo_file_matches_pattern - Pattern matching without '/' character.
*
* @filename: The start of string to check.
* @filename_end: The end of string to check.
* @pattern: The start of pattern to compare.
* @pattern_end: The end of pattern to compare.
*
* Returns true if @filename matches @pattern, false otherwise.
*/
static bool tomoyo_file_matches_pattern(const char *filename,
const char *filename_end,
const char *pattern,
const char *pattern_end)
{
const char *pattern_start = pattern;
bool first = true;
bool result;
while (pattern < pattern_end - 1) {
/* Split at "\-" pattern. */
if (*pattern++ != '\\' || *pattern++ != '-')
continue;
result = tomoyo_file_matches_pattern2(filename,
filename_end,
pattern_start,
pattern - 2);
if (first)
result = !result;
if (result)
return false;
first = false;
pattern_start = pattern;
}
result = tomoyo_file_matches_pattern2(filename, filename_end,
pattern_start, pattern_end);
return first ? result : !result;
}
/**
* tomoyo_path_matches_pattern2 - Do pathname pattern matching.
*
* @f: The start of string to check.
* @p: The start of pattern to compare.
*
* Returns true if @f matches @p, false otherwise.
*/
static bool tomoyo_path_matches_pattern2(const char *f, const char *p)
{
const char *f_delimiter;
const char *p_delimiter;
while (*f && *p) {
f_delimiter = strchr(f, '/');
if (!f_delimiter)
f_delimiter = f + strlen(f);
p_delimiter = strchr(p, '/');
if (!p_delimiter)
p_delimiter = p + strlen(p);
if (*p == '\\' && *(p + 1) == '{')
goto recursive;
if (!tomoyo_file_matches_pattern(f, f_delimiter, p,
p_delimiter))
return false;
f = f_delimiter;
if (*f)
f++;
p = p_delimiter;
if (*p)
p++;
}
/* Ignore trailing "\*" and "\@" in @pattern. */
while (*p == '\\' &&
(*(p + 1) == '*' || *(p + 1) == '@'))
p += 2;
return !*f && !*p;
recursive:
/*
* The "\{" pattern is permitted only after '/' character.
* This guarantees that below "*(p - 1)" is safe.
* Also, the "\}" pattern is permitted only before '/' character
* so that "\{" + "\}" pair will not break the "\-" operator.
*/
if (*(p - 1) != '/' || p_delimiter <= p + 3 || *p_delimiter != '/' ||
*(p_delimiter - 1) != '}' || *(p_delimiter - 2) != '\\')
return false; /* Bad pattern. */
do {
/* Compare current component with pattern. */
if (!tomoyo_file_matches_pattern(f, f_delimiter, p + 2,
p_delimiter - 2))
break;
/* Proceed to next component. */
f = f_delimiter;
if (!*f)
break;
f++;
/* Continue comparison. */
if (tomoyo_path_matches_pattern2(f, p_delimiter + 1))
return true;
f_delimiter = strchr(f, '/');
} while (f_delimiter);
return false; /* Not matched. */
}
/**
* tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern.
*
* @filename: The filename to check.
* @pattern: The pattern to compare.
*
* Returns true if matches, false otherwise.
*
* The following patterns are available.
* \\ \ itself.
* \ooo Octal representation of a byte.
* \* Zero or more repetitions of characters other than '/'.
* \@ Zero or more repetitions of characters other than '/' or '.'.
* \? 1 byte character other than '/'.
* \$ One or more repetitions of decimal digits.
* \+ 1 decimal digit.
* \X One or more repetitions of hexadecimal digits.
* \x 1 hexadecimal digit.
* \A One or more repetitions of alphabet characters.
* \a 1 alphabet character.
*
* \- Subtraction operator.
*
* /\{dir\}/ '/' + 'One or more repetitions of dir/' (e.g. /dir/ /dir/dir/
* /dir/dir/dir/ ).
*/
bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
const struct tomoyo_path_info *pattern)
{
const char *f = filename->name;
const char *p = pattern->name;
const int len = pattern->const_len;
/* If @pattern doesn't contain pattern, I can use strcmp(). */
if (!pattern->is_patterned)
return !tomoyo_pathcmp(filename, pattern);
/* Don't compare directory and non-directory. */
if (filename->is_dir != pattern->is_dir)
return false;
/* Compare the initial length without patterns. */
if (strncmp(f, p, len))
return false;
f += len;
p += len;
return tomoyo_path_matches_pattern2(f, p);
}
/**
* tomoyo_get_exe - Get tomoyo_realpath() of current process.
*
* Returns the tomoyo_realpath() of current process on success, NULL otherwise.
*
* This function uses kzalloc(), so the caller must call kfree()
* if this function didn't return NULL.
*/
const char *tomoyo_get_exe(void)
{
struct file *exe_file;
const char *cp;
struct mm_struct *mm = current->mm;
if (!mm)
return NULL;
exe_file = get_mm_exe_file(mm);
if (!exe_file)
return NULL;
cp = tomoyo_realpath_from_path(&exe_file->f_path);
fput(exe_file);
return cp;
}
/**
* tomoyo_get_mode - Get MAC mode.
*
* @ns: Pointer to "struct tomoyo_policy_namespace".
* @profile: Profile number.
* @index: Index number of functionality.
*
* Returns mode.
*/
int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
const u8 index)
{
u8 mode;
struct tomoyo_profile *p;
if (!tomoyo_policy_loaded)
return TOMOYO_CONFIG_DISABLED;
p = tomoyo_profile(ns, profile);
mode = p->config[index];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
mode = p->config[tomoyo_index2category[index]
+ TOMOYO_MAX_MAC_INDEX];
if (mode == TOMOYO_CONFIG_USE_DEFAULT)
mode = p->default_config;
return mode & 3;
}
/**
* tomoyo_init_request_info - Initialize "struct tomoyo_request_info" members.
*
* @r: Pointer to "struct tomoyo_request_info" to initialize.
* @domain: Pointer to "struct tomoyo_domain_info". NULL for tomoyo_domain().
* @index: Index number of functionality.
*
* Returns mode.
*/
int tomoyo_init_request_info(struct tomoyo_request_info *r,
struct tomoyo_domain_info *domain, const u8 index)
{
u8 profile;
memset(r, 0, sizeof(*r));
if (!domain)
domain = tomoyo_domain();
r->domain = domain;
profile = domain->profile;
r->profile = profile;
r->type = index;
r->mode = tomoyo_get_mode(domain->ns, profile, index);
return r->mode;
}
/**
* tomoyo_domain_quota_is_ok - Check for domain's quota.
*
* @r: Pointer to "struct tomoyo_request_info".
*
* Returns true if the domain is not exceeded quota, false otherwise.
*
* Caller holds tomoyo_read_lock().
*/
bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
{
unsigned int count = 0;
struct tomoyo_domain_info *domain = r->domain;
struct tomoyo_acl_info *ptr;
if (r->mode != TOMOYO_CONFIG_LEARNING)
return false;
if (!domain)
return true;
if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED]))
return false;
list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
srcu_read_lock_held(&tomoyo_ss)) {
u16 perm;
if (ptr->is_deleted)
continue;
/*
* Reading perm bitmap might race with tomoyo_merge_*() because
* caller does not hold tomoyo_policy_lock mutex. But exceeding
* max_learning_entry parameter by a few entries does not harm.
*/
switch (ptr->type) {
case TOMOYO_TYPE_PATH_ACL:
perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
break;
case TOMOYO_TYPE_PATH2_ACL:
perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
break;
case TOMOYO_TYPE_PATH_NUMBER_ACL:
perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
->perm);
break;
case TOMOYO_TYPE_MKDEV_ACL:
perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
break;
case TOMOYO_TYPE_INET_ACL:
perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
break;
case TOMOYO_TYPE_UNIX_ACL:
perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);
break;
case TOMOYO_TYPE_MANUAL_TASK_ACL:
perm = 0;
break;
default:
perm = 1;
}
count += hweight16(perm);
}
if (count < tomoyo_profile(domain->ns, domain->profile)->
pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
return true;
WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true);
/* r->granted = false; */
tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
#ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
domain->domainname->name);
#endif
return false;
}
| linux-master | security/tomoyo/util.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.