[PATCH v2 01/26] staging: most: rearrange source files

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch renames and moves the source files of the driver. Additionally,
it adapts the ABI, Makefiles and Kconfig files of the kernel's build
system.

Signed-off-by: Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
---
v2: fix patch numeration

 .../most/Documentation/ABI/sysfs-bus-most.txt      |  313 ++++
 drivers/staging/most/Kconfig                       |   27 +-
 drivers/staging/most/Makefile                      |   19 +-
 drivers/staging/most/aim-cdev/Kconfig              |   12 -
 drivers/staging/most/aim-cdev/Makefile             |    4 -
 drivers/staging/most/aim-cdev/cdev.c               |  564 ------
 drivers/staging/most/aim-network/Kconfig           |   13 -
 drivers/staging/most/aim-network/Makefile          |    4 -
 drivers/staging/most/aim-network/networking.c      |  567 ------
 drivers/staging/most/aim-sound/Kconfig             |   13 -
 drivers/staging/most/aim-sound/Makefile            |    4 -
 drivers/staging/most/aim-sound/sound.c             |  765 --------
 drivers/staging/most/aim-v4l2/Kconfig              |   12 -
 drivers/staging/most/aim-v4l2/Makefile             |    5 -
 drivers/staging/most/aim-v4l2/video.c              |  616 -------
 drivers/staging/most/cdev/Kconfig                  |   12 +
 drivers/staging/most/cdev/Makefile                 |    4 +
 drivers/staging/most/cdev/cdev.c                   |  564 ++++++
 drivers/staging/most/core.c                        | 1949 ++++++++++++++++++++
 drivers/staging/most/core.h                        |  325 ++++
 drivers/staging/most/dim2/Kconfig                  |   16 +
 drivers/staging/most/dim2/Makefile                 |    4 +
 drivers/staging/most/dim2/dim2.c                   |  918 +++++++++
 drivers/staging/most/dim2/dim2.h                   |   27 +
 drivers/staging/most/dim2/errors.h                 |   57 +
 drivers/staging/most/dim2/hal.c                    |  985 ++++++++++
 drivers/staging/most/dim2/hal.h                    |  112 ++
 drivers/staging/most/dim2/reg.h                    |  163 ++
 drivers/staging/most/dim2/sysfs.c                  |  115 ++
 drivers/staging/most/dim2/sysfs.h                  |   36 +
 drivers/staging/most/hdm-dim2/Kconfig              |   16 -
 drivers/staging/most/hdm-dim2/Makefile             |    5 -
 drivers/staging/most/hdm-dim2/dim2_errors.h        |   57 -
 drivers/staging/most/hdm-dim2/dim2_hal.c           |  985 ----------
 drivers/staging/most/hdm-dim2/dim2_hal.h           |  112 --
 drivers/staging/most/hdm-dim2/dim2_hdm.c           |  918 ---------
 drivers/staging/most/hdm-dim2/dim2_hdm.h           |   27 -
 drivers/staging/most/hdm-dim2/dim2_reg.h           |  163 --
 drivers/staging/most/hdm-dim2/dim2_sysfs.c         |  115 --
 drivers/staging/most/hdm-dim2/dim2_sysfs.h         |   36 -
 drivers/staging/most/hdm-i2c/Kconfig               |   12 -
 drivers/staging/most/hdm-i2c/Makefile              |    3 -
 drivers/staging/most/hdm-i2c/hdm_i2c.c             |  423 -----
 drivers/staging/most/hdm-usb/Kconfig               |   14 -
 drivers/staging/most/hdm-usb/Makefile              |    4 -
 drivers/staging/most/hdm-usb/hdm_usb.c             | 1307 -------------
 drivers/staging/most/i2c/Kconfig                   |   12 +
 drivers/staging/most/i2c/Makefile                  |    4 +
 drivers/staging/most/i2c/i2c.c                     |  423 +++++
 drivers/staging/most/mostcore/Kconfig              |   14 -
 drivers/staging/most/mostcore/Makefile             |    3 -
 drivers/staging/most/mostcore/core.c               | 1949 --------------------
 drivers/staging/most/mostcore/mostcore.h           |  325 ----
 drivers/staging/most/net/Kconfig                   |   13 +
 drivers/staging/most/net/Makefile                  |    4 +
 drivers/staging/most/net/net.c                     |  567 ++++++
 drivers/staging/most/sound/Kconfig                 |   13 +
 drivers/staging/most/sound/Makefile                |    4 +
 drivers/staging/most/sound/sound.c                 |  765 ++++++++
 drivers/staging/most/usb/Kconfig                   |   13 +
 drivers/staging/most/usb/Makefile                  |    4 +
 drivers/staging/most/usb/usb.c                     | 1307 +++++++++++++
 drivers/staging/most/video/Kconfig                 |   12 +
 drivers/staging/most/video/Makefile                |    4 +
 drivers/staging/most/video/video.c                 |  616 +++++++
 65 files changed, 9387 insertions(+), 9087 deletions(-)
 create mode 100644 drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt
 delete mode 100644 drivers/staging/most/aim-cdev/Kconfig
 delete mode 100644 drivers/staging/most/aim-cdev/Makefile
 delete mode 100644 drivers/staging/most/aim-cdev/cdev.c
 delete mode 100644 drivers/staging/most/aim-network/Kconfig
 delete mode 100644 drivers/staging/most/aim-network/Makefile
 delete mode 100644 drivers/staging/most/aim-network/networking.c
 delete mode 100644 drivers/staging/most/aim-sound/Kconfig
 delete mode 100644 drivers/staging/most/aim-sound/Makefile
 delete mode 100644 drivers/staging/most/aim-sound/sound.c
 delete mode 100644 drivers/staging/most/aim-v4l2/Kconfig
 delete mode 100644 drivers/staging/most/aim-v4l2/Makefile
 delete mode 100644 drivers/staging/most/aim-v4l2/video.c
 create mode 100644 drivers/staging/most/cdev/Kconfig
 create mode 100644 drivers/staging/most/cdev/Makefile
 create mode 100644 drivers/staging/most/cdev/cdev.c
 create mode 100644 drivers/staging/most/core.c
 create mode 100644 drivers/staging/most/core.h
 create mode 100644 drivers/staging/most/dim2/Kconfig
 create mode 100644 drivers/staging/most/dim2/Makefile
 create mode 100644 drivers/staging/most/dim2/dim2.c
 create mode 100644 drivers/staging/most/dim2/dim2.h
 create mode 100644 drivers/staging/most/dim2/errors.h
 create mode 100644 drivers/staging/most/dim2/hal.c
 create mode 100644 drivers/staging/most/dim2/hal.h
 create mode 100644 drivers/staging/most/dim2/reg.h
 create mode 100644 drivers/staging/most/dim2/sysfs.c
 create mode 100644 drivers/staging/most/dim2/sysfs.h
 delete mode 100644 drivers/staging/most/hdm-dim2/Kconfig
 delete mode 100644 drivers/staging/most/hdm-dim2/Makefile
 delete mode 100644 drivers/staging/most/hdm-dim2/dim2_errors.h
 delete mode 100644 drivers/staging/most/hdm-dim2/dim2_hal.c
 delete mode 100644 drivers/staging/most/hdm-dim2/dim2_hal.h
 delete mode 100644 drivers/staging/most/hdm-dim2/dim2_hdm.c
 delete mode 100644 drivers/staging/most/hdm-dim2/dim2_hdm.h
 delete mode 100644 drivers/staging/most/hdm-dim2/dim2_reg.h
 delete mode 100644 drivers/staging/most/hdm-dim2/dim2_sysfs.c
 delete mode 100644 drivers/staging/most/hdm-dim2/dim2_sysfs.h
 delete mode 100644 drivers/staging/most/hdm-i2c/Kconfig
 delete mode 100644 drivers/staging/most/hdm-i2c/Makefile
 delete mode 100644 drivers/staging/most/hdm-i2c/hdm_i2c.c
 delete mode 100644 drivers/staging/most/hdm-usb/Kconfig
 delete mode 100644 drivers/staging/most/hdm-usb/Makefile
 delete mode 100644 drivers/staging/most/hdm-usb/hdm_usb.c
 create mode 100644 drivers/staging/most/i2c/Kconfig
 create mode 100644 drivers/staging/most/i2c/Makefile
 create mode 100644 drivers/staging/most/i2c/i2c.c
 delete mode 100644 drivers/staging/most/mostcore/Kconfig
 delete mode 100644 drivers/staging/most/mostcore/Makefile
 delete mode 100644 drivers/staging/most/mostcore/core.c
 delete mode 100644 drivers/staging/most/mostcore/mostcore.h
 create mode 100644 drivers/staging/most/net/Kconfig
 create mode 100644 drivers/staging/most/net/Makefile
 create mode 100644 drivers/staging/most/net/net.c
 create mode 100644 drivers/staging/most/sound/Kconfig
 create mode 100644 drivers/staging/most/sound/Makefile
 create mode 100644 drivers/staging/most/sound/sound.c
 create mode 100644 drivers/staging/most/usb/Kconfig
 create mode 100644 drivers/staging/most/usb/Makefile
 create mode 100644 drivers/staging/most/usb/usb.c
 create mode 100644 drivers/staging/most/video/Kconfig
 create mode 100644 drivers/staging/most/video/Makefile
 create mode 100644 drivers/staging/most/video/video.c

diff --git a/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt b/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt
new file mode 100644
index 0000000..543884d
--- /dev/null
+++ b/drivers/staging/most/Documentation/ABI/sysfs-bus-most.txt
@@ -0,0 +1,313 @@
+What:		/sys/bus/most/devices/.../description
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Provides information about the interface type and the physical
+		location of the device. Hardware attached via USB, for instance,
+		might return <usb_device 1-1.1:1.0>
+Users:
+
+What:		/sys/bus/most/devices/.../interface
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the type of peripheral interface the device uses.
+Users:
+
+What:		/sys/bus/most/devices/.../dci
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		If the network interface controller is attached via USB, a dci
+		directory is created that allows applications to read and
+		write the controller's DCI registers.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/arb_address
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to set an arbitrary DCI register address an
+		application wants to read from or write to.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/arb_value
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to read and write the DCI register whose address
+		is stored in arb_address.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/mep_eui48_hi
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to check and configure the MAC address.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/mep_eui48_lo
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to check and configure the MAC address.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/mep_eui48_mi
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to check and configure the MAC address.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/mep_filter
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to check and configure the MEP filter address.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/mep_hash0
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to check and configure the MEP hash table.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/mep_hash1
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to check and configure the MEP hash table.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/mep_hash2
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to check and configure the MEP hash table.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/mep_hash3
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to check and configure the MEP hash table.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/ni_state
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the current network interface state.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/node_address
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the current node address.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/node_position
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the current node position.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/packet_bandwidth
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the configured packet bandwidth.
+Users:
+
+What:		/sys/bus/most/devices/.../dci/sync_ep
+Date:		June 2016
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Triggers the controller's synchronization process for a certain
+		endpoint.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		For every channel of the device a directory is created, whose
+		name is dictated by the HDM. This enables an application to
+		collect information about the channel's capabilities and
+		configure it.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/available_datatypes
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the data types the current channel can transport.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/available_directions
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the directions the current channel is capable of.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/number_of_packet_buffers
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the number of packet buffers the current channel can
+		handle.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/number_of_stream_buffers
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the number of streaming buffers the current channel can
+		handle.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/size_of_packet_buffer
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the size of a packet buffer the current channel can
+		handle.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/size_of_stream_buffer
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates the size of a streaming buffer the current channel can
+		handle.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/set_number_of_buffers
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is to configure the number of buffers of the current channel.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/set_buffer_size
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is to configure the size of a buffer of the current channel.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/set_direction
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is to configure the direction of the current channel.
+		The following strings will be accepted:
+			'dir_tx',
+			'dir_rx'
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/set_datatype
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is to configure the data type of the current channel.
+		The following strings will be accepted:
+			'control',
+			'async',
+			'sync',
+			'isoc_avp'
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/set_subbuffer_size
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is to configure the subbuffer size of the current channel.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/set_packets_per_xact
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is to configure the number of packets per transaction of
+		the current channel. This is only needed network interface
+		controller is attached via USB.
+Users:
+
+What:		/sys/bus/most/devices/.../<channel>/channel_starving
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		Indicates whether current channel ran out of buffers.
+Users:
+
+What:		/sys/bus/most/drivers/mostcore/add_link
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to link a channel to a component of the
+		mostcore. A link created by writing to this file is
+		refered to as pipe.
+Users:
+
+What:		/sys/bus/most/drivers/mostcore/remove_link
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to unlink a channel from a component.
+Users:
+
+What:		/sys/bus/most/drivers/mostcore/components
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to retrieve a list of registered components.
+Users:
+
+What:		/sys/bus/most/drivers/mostcore/links
+Date:		March 2017
+KernelVersion:	4.15
+Contact:	Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+Description:
+		This is used to retrieve a list of established links.
+Users:
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index 0b9b9b5..20047ab 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,10 +1,15 @@
 menuconfig MOST
-        tristate "MOST driver"
+        tristate "MOST support"
 	depends on HAS_DMA
-        select MOSTCORE
         default n
         ---help---
-          This option allows you to enable support for MOST Network transceivers.
+	  Say Y here if you want to enable MOST support.
+	  This driver needs at least one additional component to enable the
+	  desired access from userspace (e.g. character devices) and one that
+	  matches the network controller's hardware interface (e.g. USB).
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called most_core.
 
           If in doubt, say N here.
 
@@ -12,20 +17,18 @@ menuconfig MOST
 
 if MOST
 
-source "drivers/staging/most/mostcore/Kconfig"
-
-source "drivers/staging/most/aim-cdev/Kconfig"
+source "drivers/staging/most/cdev/Kconfig"
 
-source "drivers/staging/most/aim-network/Kconfig"
+source "drivers/staging/most/net/Kconfig"
 
-source "drivers/staging/most/aim-sound/Kconfig"
+source "drivers/staging/most/sound/Kconfig"
 
-source "drivers/staging/most/aim-v4l2/Kconfig"
+source "drivers/staging/most/video/Kconfig"
 
-source "drivers/staging/most/hdm-dim2/Kconfig"
+source "drivers/staging/most/dim2/Kconfig"
 
-source "drivers/staging/most/hdm-i2c/Kconfig"
+source "drivers/staging/most/i2c/Kconfig"
 
-source "drivers/staging/most/hdm-usb/Kconfig"
+source "drivers/staging/most/usb/Kconfig"
 
 endif
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index 9ee981c..2995309 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -1,8 +1,11 @@
-obj-$(CONFIG_MOSTCORE)	+= mostcore/
-obj-$(CONFIG_AIM_CDEV)	+= aim-cdev/
-obj-$(CONFIG_AIM_NETWORK)	+= aim-network/
-obj-$(CONFIG_AIM_SOUND)	+= aim-sound/
-obj-$(CONFIG_AIM_V4L2)	+= aim-v4l2/
-obj-$(CONFIG_HDM_DIM2)	+= hdm-dim2/
-obj-$(CONFIG_HDM_I2C)	+= hdm-i2c/
-obj-$(CONFIG_HDM_USB)	+= hdm-usb/
+obj-$(CONFIG_MOST) += most_core.o
+most_core-y := core.o
+ccflags-y += -Idrivers/staging/
+
+obj-$(CONFIG_MOST_CDEV)	+= cdev/
+obj-$(CONFIG_MOST_NET)	+= net/
+obj-$(CONFIG_MOST_SOUND)	+= sound/
+obj-$(CONFIG_MOST_VIDEO)	+= video/
+obj-$(CONFIG_MOST_DIM2)	+= dim2/
+obj-$(CONFIG_MOST_I2C)	+= i2c/
+obj-$(CONFIG_MOST_USB)	+= usb/
diff --git a/drivers/staging/most/aim-cdev/Kconfig b/drivers/staging/most/aim-cdev/Kconfig
deleted file mode 100644
index 3c59f1b..0000000
--- a/drivers/staging/most/aim-cdev/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# MOST Cdev configuration
-#
-
-config AIM_CDEV
-	tristate "Cdev AIM"
-
-	---help---
-	  Say Y here if you want to commumicate via character devices.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called aim_cdev.
\ No newline at end of file
diff --git a/drivers/staging/most/aim-cdev/Makefile b/drivers/staging/most/aim-cdev/Makefile
deleted file mode 100644
index 0bcc6c6..0000000
--- a/drivers/staging/most/aim-cdev/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_AIM_CDEV) += aim_cdev.o
-
-aim_cdev-objs := cdev.o
-ccflags-y += -Idrivers/staging/most/mostcore/
\ No newline at end of file
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
deleted file mode 100644
index 1e5cbc8..0000000
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * cdev.c - Application interfacing module for character devices
- *
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/cdev.h>
-#include <linux/poll.h>
-#include <linux/kfifo.h>
-#include <linux/uaccess.h>
-#include <linux/idr.h>
-#include "mostcore.h"
-
-static dev_t aim_devno;
-static struct class *aim_class;
-static struct ida minor_id;
-static unsigned int major;
-static struct most_aim cdev_aim;
-
-struct aim_channel {
-	wait_queue_head_t wq;
-	spinlock_t unlink;	/* synchronization lock to unlink channels */
-	struct cdev cdev;
-	struct device *dev;
-	struct mutex io_mutex;
-	struct most_interface *iface;
-	struct most_channel_config *cfg;
-	unsigned int channel_id;
-	dev_t devno;
-	size_t mbo_offs;
-	DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
-	int access_ref;
-	struct list_head list;
-};
-
-#define to_channel(d) container_of(d, struct aim_channel, cdev)
-static struct list_head channel_list;
-static spinlock_t ch_list_lock;
-
-static inline bool ch_has_mbo(struct aim_channel *c)
-{
-	return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
-}
-
-static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
-{
-	if (!kfifo_peek(&c->fifo, mbo)) {
-		*mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
-		if (*mbo)
-			kfifo_in(&c->fifo, mbo, 1);
-	}
-	return *mbo;
-}
-
-static struct aim_channel *get_channel(struct most_interface *iface, int id)
-{
-	struct aim_channel *c, *tmp;
-	unsigned long flags;
-	int found_channel = 0;
-
-	spin_lock_irqsave(&ch_list_lock, flags);
-	list_for_each_entry_safe(c, tmp, &channel_list, list) {
-		if ((c->iface == iface) && (c->channel_id == id)) {
-			found_channel = 1;
-			break;
-		}
-	}
-	spin_unlock_irqrestore(&ch_list_lock, flags);
-	if (!found_channel)
-		return NULL;
-	return c;
-}
-
-static void stop_channel(struct aim_channel *c)
-{
-	struct mbo *mbo;
-
-	while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
-		most_put_mbo(mbo);
-	most_stop_channel(c->iface, c->channel_id, &cdev_aim);
-}
-
-static void destroy_cdev(struct aim_channel *c)
-{
-	unsigned long flags;
-
-	device_destroy(aim_class, c->devno);
-	cdev_del(&c->cdev);
-	spin_lock_irqsave(&ch_list_lock, flags);
-	list_del(&c->list);
-	spin_unlock_irqrestore(&ch_list_lock, flags);
-}
-
-static void destroy_channel(struct aim_channel *c)
-{
-	ida_simple_remove(&minor_id, MINOR(c->devno));
-	kfifo_free(&c->fifo);
-	kfree(c);
-}
-
-/**
- * aim_open - implements the syscall to open the device
- * @inode: inode pointer
- * @filp: file pointer
- *
- * This stores the channel pointer in the private data field of
- * the file structure and activates the channel within the core.
- */
-static int aim_open(struct inode *inode, struct file *filp)
-{
-	struct aim_channel *c;
-	int ret;
-
-	c = to_channel(inode->i_cdev);
-	filp->private_data = c;
-
-	if (((c->cfg->direction == MOST_CH_RX) &&
-	     ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
-	     ((c->cfg->direction == MOST_CH_TX) &&
-		((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
-		pr_info("WARN: Access flags mismatch\n");
-		return -EACCES;
-	}
-
-	mutex_lock(&c->io_mutex);
-	if (!c->dev) {
-		pr_info("WARN: Device is destroyed\n");
-		mutex_unlock(&c->io_mutex);
-		return -ENODEV;
-	}
-
-	if (c->access_ref) {
-		pr_info("WARN: Device is busy\n");
-		mutex_unlock(&c->io_mutex);
-		return -EBUSY;
-	}
-
-	c->mbo_offs = 0;
-	ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
-	if (!ret)
-		c->access_ref = 1;
-	mutex_unlock(&c->io_mutex);
-	return ret;
-}
-
-/**
- * aim_close - implements the syscall to close the device
- * @inode: inode pointer
- * @filp: file pointer
- *
- * This stops the channel within the core.
- */
-static int aim_close(struct inode *inode, struct file *filp)
-{
-	struct aim_channel *c = to_channel(inode->i_cdev);
-
-	mutex_lock(&c->io_mutex);
-	spin_lock(&c->unlink);
-	c->access_ref = 0;
-	spin_unlock(&c->unlink);
-	if (c->dev) {
-		stop_channel(c);
-		mutex_unlock(&c->io_mutex);
-	} else {
-		mutex_unlock(&c->io_mutex);
-		destroy_channel(c);
-	}
-	return 0;
-}
-
-/**
- * aim_write - implements the syscall to write to the device
- * @filp: file pointer
- * @buf: pointer to user buffer
- * @count: number of bytes to write
- * @offset: offset from where to start writing
- */
-static ssize_t aim_write(struct file *filp, const char __user *buf,
-			 size_t count, loff_t *offset)
-{
-	int ret;
-	size_t to_copy, left;
-	struct mbo *mbo = NULL;
-	struct aim_channel *c = filp->private_data;
-
-	mutex_lock(&c->io_mutex);
-	while (c->dev && !ch_get_mbo(c, &mbo)) {
-		mutex_unlock(&c->io_mutex);
-
-		if ((filp->f_flags & O_NONBLOCK))
-			return -EAGAIN;
-		if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
-			return -ERESTARTSYS;
-		mutex_lock(&c->io_mutex);
-	}
-
-	if (unlikely(!c->dev)) {
-		ret = -ENODEV;
-		goto unlock;
-	}
-
-	to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
-	left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
-	if (left == to_copy) {
-		ret = -EFAULT;
-		goto unlock;
-	}
-
-	c->mbo_offs += to_copy - left;
-	if (c->mbo_offs >= c->cfg->buffer_size ||
-	    c->cfg->data_type == MOST_CH_CONTROL ||
-	    c->cfg->data_type == MOST_CH_ASYNC) {
-		kfifo_skip(&c->fifo);
-		mbo->buffer_length = c->mbo_offs;
-		c->mbo_offs = 0;
-		most_submit_mbo(mbo);
-	}
-
-	ret = to_copy - left;
-unlock:
-	mutex_unlock(&c->io_mutex);
-	return ret;
-}
-
-/**
- * aim_read - implements the syscall to read from the device
- * @filp: file pointer
- * @buf: pointer to user buffer
- * @count: number of bytes to read
- * @offset: offset from where to start reading
- */
-static ssize_t
-aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
-{
-	size_t to_copy, not_copied, copied;
-	struct mbo *mbo;
-	struct aim_channel *c = filp->private_data;
-
-	mutex_lock(&c->io_mutex);
-	while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
-		mutex_unlock(&c->io_mutex);
-		if (filp->f_flags & O_NONBLOCK)
-			return -EAGAIN;
-		if (wait_event_interruptible(c->wq,
-					     (!kfifo_is_empty(&c->fifo) ||
-					      (!c->dev))))
-			return -ERESTARTSYS;
-		mutex_lock(&c->io_mutex);
-	}
-
-	/* make sure we don't submit to gone devices */
-	if (unlikely(!c->dev)) {
-		mutex_unlock(&c->io_mutex);
-		return -ENODEV;
-	}
-
-	to_copy = min_t(size_t,
-			count,
-			mbo->processed_length - c->mbo_offs);
-
-	not_copied = copy_to_user(buf,
-				  mbo->virt_address + c->mbo_offs,
-				  to_copy);
-
-	copied = to_copy - not_copied;
-
-	c->mbo_offs += copied;
-	if (c->mbo_offs >= mbo->processed_length) {
-		kfifo_skip(&c->fifo);
-		most_put_mbo(mbo);
-		c->mbo_offs = 0;
-	}
-	mutex_unlock(&c->io_mutex);
-	return copied;
-}
-
-static unsigned int aim_poll(struct file *filp, poll_table *wait)
-{
-	struct aim_channel *c = filp->private_data;
-	unsigned int mask = 0;
-
-	poll_wait(filp, &c->wq, wait);
-
-	if (c->cfg->direction == MOST_CH_RX) {
-		if (!kfifo_is_empty(&c->fifo))
-			mask |= POLLIN | POLLRDNORM;
-	} else {
-		if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
-			mask |= POLLOUT | POLLWRNORM;
-	}
-	return mask;
-}
-
-/**
- * Initialization of struct file_operations
- */
-static const struct file_operations channel_fops = {
-	.owner = THIS_MODULE,
-	.read = aim_read,
-	.write = aim_write,
-	.open = aim_open,
-	.release = aim_close,
-	.poll = aim_poll,
-};
-
-/**
- * aim_disconnect_channel - disconnect a channel
- * @iface: pointer to interface instance
- * @channel_id: channel index
- *
- * This frees allocated memory and removes the cdev that represents this
- * channel in user space.
- */
-static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
-{
-	struct aim_channel *c;
-
-	if (!iface) {
-		pr_info("Bad interface pointer\n");
-		return -EINVAL;
-	}
-
-	c = get_channel(iface, channel_id);
-	if (!c)
-		return -ENXIO;
-
-	mutex_lock(&c->io_mutex);
-	spin_lock(&c->unlink);
-	c->dev = NULL;
-	spin_unlock(&c->unlink);
-	destroy_cdev(c);
-	if (c->access_ref) {
-		stop_channel(c);
-		wake_up_interruptible(&c->wq);
-		mutex_unlock(&c->io_mutex);
-	} else {
-		mutex_unlock(&c->io_mutex);
-		destroy_channel(c);
-	}
-	return 0;
-}
-
-/**
- * aim_rx_completion - completion handler for rx channels
- * @mbo: pointer to buffer object that has completed
- *
- * This searches for the channel linked to this MBO and stores it in the local
- * fifo buffer.
- */
-static int aim_rx_completion(struct mbo *mbo)
-{
-	struct aim_channel *c;
-
-	if (!mbo)
-		return -EINVAL;
-
-	c = get_channel(mbo->ifp, mbo->hdm_channel_id);
-	if (!c)
-		return -ENXIO;
-
-	spin_lock(&c->unlink);
-	if (!c->access_ref || !c->dev) {
-		spin_unlock(&c->unlink);
-		return -ENODEV;
-	}
-	kfifo_in(&c->fifo, &mbo, 1);
-	spin_unlock(&c->unlink);
-#ifdef DEBUG_MESG
-	if (kfifo_is_full(&c->fifo))
-		pr_info("WARN: Fifo is full\n");
-#endif
-	wake_up_interruptible(&c->wq);
-	return 0;
-}
-
-/**
- * aim_tx_completion - completion handler for tx channels
- * @iface: pointer to interface instance
- * @channel_id: channel index/ID
- *
- * This wakes sleeping processes in the wait-queue.
- */
-static int aim_tx_completion(struct most_interface *iface, int channel_id)
-{
-	struct aim_channel *c;
-
-	if (!iface) {
-		pr_info("Bad interface pointer\n");
-		return -EINVAL;
-	}
-	if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
-		pr_info("Channel ID out of range\n");
-		return -EINVAL;
-	}
-
-	c = get_channel(iface, channel_id);
-	if (!c)
-		return -ENXIO;
-	wake_up_interruptible(&c->wq);
-	return 0;
-}
-
-/**
- * aim_probe - probe function of the driver module
- * @iface: pointer to interface instance
- * @channel_id: channel index/ID
- * @cfg: pointer to actual channel configuration
- * @parent: pointer to kobject (needed for sysfs hook-up)
- * @name: name of the device to be created
- *
- * This allocates achannel object and creates the device node in /dev
- *
- * Returns 0 on success or error code otherwise.
- */
-static int aim_probe(struct most_interface *iface, int channel_id,
-		     struct most_channel_config *cfg,
-		     struct kobject *parent, char *name)
-{
-	struct aim_channel *c;
-	unsigned long cl_flags;
-	int retval;
-	int current_minor;
-
-	if ((!iface) || (!cfg) || (!parent) || (!name)) {
-		pr_info("Probing AIM with bad arguments");
-		return -EINVAL;
-	}
-	c = get_channel(iface, channel_id);
-	if (c)
-		return -EEXIST;
-
-	current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
-	if (current_minor < 0)
-		return current_minor;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c) {
-		retval = -ENOMEM;
-		goto error_alloc_channel;
-	}
-
-	c->devno = MKDEV(major, current_minor);
-	cdev_init(&c->cdev, &channel_fops);
-	c->cdev.owner = THIS_MODULE;
-	cdev_add(&c->cdev, c->devno, 1);
-	c->iface = iface;
-	c->cfg = cfg;
-	c->channel_id = channel_id;
-	c->access_ref = 0;
-	spin_lock_init(&c->unlink);
-	INIT_KFIFO(c->fifo);
-	retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
-	if (retval) {
-		pr_info("failed to alloc channel kfifo");
-		goto error_alloc_kfifo;
-	}
-	init_waitqueue_head(&c->wq);
-	mutex_init(&c->io_mutex);
-	spin_lock_irqsave(&ch_list_lock, cl_flags);
-	list_add_tail(&c->list, &channel_list);
-	spin_unlock_irqrestore(&ch_list_lock, cl_flags);
-	c->dev = device_create(aim_class,
-				     NULL,
-				     c->devno,
-				     NULL,
-				     "%s", name);
-
-	if (IS_ERR(c->dev)) {
-		retval = PTR_ERR(c->dev);
-		pr_info("failed to create new device node %s\n", name);
-		goto error_create_device;
-	}
-	kobject_uevent(&c->dev->kobj, KOBJ_ADD);
-	return 0;
-
-error_create_device:
-	kfifo_free(&c->fifo);
-	list_del(&c->list);
-error_alloc_kfifo:
-	cdev_del(&c->cdev);
-	kfree(c);
-error_alloc_channel:
-	ida_simple_remove(&minor_id, current_minor);
-	return retval;
-}
-
-static struct most_aim cdev_aim = {
-	.name = "cdev",
-	.probe_channel = aim_probe,
-	.disconnect_channel = aim_disconnect_channel,
-	.rx_completion = aim_rx_completion,
-	.tx_completion = aim_tx_completion,
-};
-
-static int __init mod_init(void)
-{
-	int err;
-
-	pr_info("init()\n");
-
-	INIT_LIST_HEAD(&channel_list);
-	spin_lock_init(&ch_list_lock);
-	ida_init(&minor_id);
-
-	err = alloc_chrdev_region(&aim_devno, 0, 50, "cdev");
-	if (err < 0)
-		goto dest_ida;
-	major = MAJOR(aim_devno);
-
-	aim_class = class_create(THIS_MODULE, "most_cdev_aim");
-	if (IS_ERR(aim_class)) {
-		pr_err("no udev support\n");
-		err = PTR_ERR(aim_class);
-		goto free_cdev;
-	}
-	err = most_register_aim(&cdev_aim);
-	if (err)
-		goto dest_class;
-	return 0;
-
-dest_class:
-	class_destroy(aim_class);
-free_cdev:
-	unregister_chrdev_region(aim_devno, 1);
-dest_ida:
-	ida_destroy(&minor_id);
-	return err;
-}
-
-static void __exit mod_exit(void)
-{
-	struct aim_channel *c, *tmp;
-
-	pr_info("exit module\n");
-
-	most_deregister_aim(&cdev_aim);
-
-	list_for_each_entry_safe(c, tmp, &channel_list, list) {
-		destroy_cdev(c);
-		destroy_channel(c);
-	}
-	class_destroy(aim_class);
-	unregister_chrdev_region(aim_devno, 1);
-	ida_destroy(&minor_id);
-}
-
-module_init(mod_init);
-module_exit(mod_exit);
-MODULE_AUTHOR("Christian Gromm <christian.gromm@xxxxxxxxxxxxx>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("character device AIM for mostcore");
diff --git a/drivers/staging/most/aim-network/Kconfig b/drivers/staging/most/aim-network/Kconfig
deleted file mode 100644
index 4c66b24..0000000
--- a/drivers/staging/most/aim-network/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# MOST Networking configuration
-#
-
-config AIM_NETWORK
-	tristate "Networking AIM"
-	depends on NET
-
-	---help---
-	  Say Y here if you want to commumicate via a networking device.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called aim_network.
diff --git a/drivers/staging/most/aim-network/Makefile b/drivers/staging/most/aim-network/Makefile
deleted file mode 100644
index 840c1dd..0000000
--- a/drivers/staging/most/aim-network/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_AIM_NETWORK) += aim_network.o
-
-aim_network-objs := networking.o
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
deleted file mode 100644
index 936f013..0000000
--- a/drivers/staging/most/aim-network/networking.c
+++ /dev/null
@@ -1,567 +0,0 @@
-/*
- * Networking AIM - Networking Application Interface Module for MostCore
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/kobject.h>
-#include "mostcore.h"
-
-#define MEP_HDR_LEN 8
-#define MDP_HDR_LEN 16
-#define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
-
-#define PMHL 5
-
-#define PMS_TELID_UNSEGM_MAMAC	0x0A
-#define PMS_FIFONO_MDP		0x01
-#define PMS_FIFONO_MEP		0x04
-#define PMS_MSGTYPE_DATA	0x04
-#define PMS_DEF_PRIO		0
-#define MEP_DEF_RETRY		15
-
-#define PMS_FIFONO_MASK		0x07
-#define PMS_FIFONO_SHIFT	3
-#define PMS_RETRY_SHIFT		4
-#define PMS_TELID_MASK		0x0F
-#define PMS_TELID_SHIFT		4
-
-#define HB(value)		((u8)((u16)(value) >> 8))
-#define LB(value)		((u8)(value))
-
-#define EXTRACT_BIT_SET(bitset_name, value) \
-	(((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
-
-#define PMS_IS_MEP(buf, len) \
-	((len) > MEP_HDR_LEN && \
-	 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
-
-#define PMS_IS_MAMAC(buf, len) \
-	((len) > MDP_HDR_LEN && \
-	 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MDP && \
-	 EXTRACT_BIT_SET(PMS_TELID, (buf)[14]) == PMS_TELID_UNSEGM_MAMAC)
-
-struct net_dev_channel {
-	bool linked;
-	int ch_id;
-};
-
-struct net_dev_context {
-	struct most_interface *iface;
-	bool is_mamac;
-	struct net_device *dev;
-	struct net_dev_channel rx;
-	struct net_dev_channel tx;
-	struct list_head list;
-};
-
-static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
-static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
-static struct spinlock list_lock; /* list_head, ch->linked = false, dev_hold */
-static struct most_aim aim;
-
-static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
-{
-	u8 *buff = mbo->virt_address;
-	const u8 broadcast[] = { 0x03, 0xFF };
-	const u8 *dest_addr = skb->data + 4;
-	const u8 *eth_type = skb->data + 12;
-	unsigned int payload_len = skb->len - ETH_HLEN;
-	unsigned int mdp_len = payload_len + MDP_HDR_LEN;
-
-	if (mbo->buffer_length < mdp_len) {
-		pr_err("drop: too small buffer! (%d for %d)\n",
-		       mbo->buffer_length, mdp_len);
-		return -EINVAL;
-	}
-
-	if (skb->len < ETH_HLEN) {
-		pr_err("drop: too small packet! (%d)\n", skb->len);
-		return -EINVAL;
-	}
-
-	if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
-		dest_addr = broadcast;
-
-	*buff++ = HB(mdp_len - 2);
-	*buff++ = LB(mdp_len - 2);
-
-	*buff++ = PMHL;
-	*buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
-	*buff++ = PMS_DEF_PRIO;
-	*buff++ = dest_addr[0];
-	*buff++ = dest_addr[1];
-	*buff++ = 0x00;
-
-	*buff++ = HB(payload_len + 6);
-	*buff++ = LB(payload_len + 6);
-
-	/* end of FPH here */
-
-	*buff++ = eth_type[0];
-	*buff++ = eth_type[1];
-	*buff++ = 0;
-	*buff++ = 0;
-
-	*buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
-	*buff++ = LB(payload_len);
-
-	memcpy(buff, skb->data + ETH_HLEN, payload_len);
-	mbo->buffer_length = mdp_len;
-	return 0;
-}
-
-static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
-{
-	u8 *buff = mbo->virt_address;
-	unsigned int mep_len = skb->len + MEP_HDR_LEN;
-
-	if (mbo->buffer_length < mep_len) {
-		pr_err("drop: too small buffer! (%d for %d)\n",
-		       mbo->buffer_length, mep_len);
-		return -EINVAL;
-	}
-
-	*buff++ = HB(mep_len - 2);
-	*buff++ = LB(mep_len - 2);
-
-	*buff++ = PMHL;
-	*buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
-	*buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
-	*buff++ = 0;
-	*buff++ = 0;
-	*buff++ = 0;
-
-	memcpy(buff, skb->data, skb->len);
-	mbo->buffer_length = mep_len;
-	return 0;
-}
-
-static int most_nd_set_mac_address(struct net_device *dev, void *p)
-{
-	struct net_dev_context *nd = netdev_priv(dev);
-	int err = eth_mac_addr(dev, p);
-
-	if (err)
-		return err;
-
-	nd->is_mamac =
-		(dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
-		 dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
-
-	/*
-	 * Set default MTU for the given packet type.
-	 * It is still possible to change MTU using ip tools afterwards.
-	 */
-	dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
-
-	return 0;
-}
-
-static void on_netinfo(struct most_interface *iface,
-		       unsigned char link_stat, unsigned char *mac_addr);
-
-static int most_nd_open(struct net_device *dev)
-{
-	struct net_dev_context *nd = netdev_priv(dev);
-	int ret = 0;
-
-	mutex_lock(&probe_disc_mt);
-
-	if (most_start_channel(nd->iface, nd->rx.ch_id, &aim)) {
-		netdev_err(dev, "most_start_channel() failed\n");
-		ret = -EBUSY;
-		goto unlock;
-	}
-
-	if (most_start_channel(nd->iface, nd->tx.ch_id, &aim)) {
-		netdev_err(dev, "most_start_channel() failed\n");
-		most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
-		ret = -EBUSY;
-		goto unlock;
-	}
-
-	netif_carrier_off(dev);
-	if (is_valid_ether_addr(dev->dev_addr))
-		netif_dormant_off(dev);
-	else
-		netif_dormant_on(dev);
-	netif_wake_queue(dev);
-	if (nd->iface->request_netinfo)
-		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, on_netinfo);
-
-unlock:
-	mutex_unlock(&probe_disc_mt);
-	return ret;
-}
-
-static int most_nd_stop(struct net_device *dev)
-{
-	struct net_dev_context *nd = netdev_priv(dev);
-
-	netif_stop_queue(dev);
-	if (nd->iface->request_netinfo)
-		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, NULL);
-	most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
-	most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
-
-	return 0;
-}
-
-static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
-				      struct net_device *dev)
-{
-	struct net_dev_context *nd = netdev_priv(dev);
-	struct mbo *mbo;
-	int ret;
-
-	mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &aim);
-
-	if (!mbo) {
-		netif_stop_queue(dev);
-		dev->stats.tx_fifo_errors++;
-		return NETDEV_TX_BUSY;
-	}
-
-	if (nd->is_mamac)
-		ret = skb_to_mamac(skb, mbo);
-	else
-		ret = skb_to_mep(skb, mbo);
-
-	if (ret) {
-		most_put_mbo(mbo);
-		dev->stats.tx_dropped++;
-		kfree_skb(skb);
-		return NETDEV_TX_OK;
-	}
-
-	most_submit_mbo(mbo);
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-	kfree_skb(skb);
-	return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops most_nd_ops = {
-	.ndo_open = most_nd_open,
-	.ndo_stop = most_nd_stop,
-	.ndo_start_xmit = most_nd_start_xmit,
-	.ndo_set_mac_address = most_nd_set_mac_address,
-};
-
-static void most_nd_setup(struct net_device *dev)
-{
-	ether_setup(dev);
-	dev->netdev_ops = &most_nd_ops;
-}
-
-static struct net_dev_context *get_net_dev(struct most_interface *iface)
-{
-	struct net_dev_context *nd;
-
-	list_for_each_entry(nd, &net_devices, list)
-		if (nd->iface == iface)
-			return nd;
-	return NULL;
-}
-
-static struct net_dev_context *get_net_dev_hold(struct most_interface *iface)
-{
-	struct net_dev_context *nd;
-	unsigned long flags;
-
-	spin_lock_irqsave(&list_lock, flags);
-	nd = get_net_dev(iface);
-	if (nd && nd->rx.linked && nd->tx.linked)
-		dev_hold(nd->dev);
-	else
-		nd = NULL;
-	spin_unlock_irqrestore(&list_lock, flags);
-	return nd;
-}
-
-static int aim_probe_channel(struct most_interface *iface, int channel_idx,
-			     struct most_channel_config *ccfg,
-			     struct kobject *parent, char *name)
-{
-	struct net_dev_context *nd;
-	struct net_dev_channel *ch;
-	struct net_device *dev;
-	unsigned long flags;
-	int ret = 0;
-
-	if (!iface)
-		return -EINVAL;
-
-	if (ccfg->data_type != MOST_CH_ASYNC)
-		return -EINVAL;
-
-	mutex_lock(&probe_disc_mt);
-	nd = get_net_dev(iface);
-	if (!nd) {
-		dev = alloc_netdev(sizeof(struct net_dev_context), "meth%d",
-				   NET_NAME_UNKNOWN, most_nd_setup);
-		if (!dev) {
-			ret = -ENOMEM;
-			goto unlock;
-		}
-
-		nd = netdev_priv(dev);
-		nd->iface = iface;
-		nd->dev = dev;
-
-		spin_lock_irqsave(&list_lock, flags);
-		list_add(&nd->list, &net_devices);
-		spin_unlock_irqrestore(&list_lock, flags);
-
-		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
-	} else {
-		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
-		if (ch->linked) {
-			pr_err("direction is allocated\n");
-			ret = -EINVAL;
-			goto unlock;
-		}
-
-		if (register_netdev(nd->dev)) {
-			pr_err("register_netdev() failed\n");
-			ret = -EINVAL;
-			goto unlock;
-		}
-	}
-	ch->ch_id = channel_idx;
-	ch->linked = true;
-
-unlock:
-	mutex_unlock(&probe_disc_mt);
-	return ret;
-}
-
-static int aim_disconnect_channel(struct most_interface *iface,
-				  int channel_idx)
-{
-	struct net_dev_context *nd;
-	struct net_dev_channel *ch;
-	unsigned long flags;
-	int ret = 0;
-
-	mutex_lock(&probe_disc_mt);
-	nd = get_net_dev(iface);
-	if (!nd) {
-		ret = -EINVAL;
-		goto unlock;
-	}
-
-	if (nd->rx.linked && channel_idx == nd->rx.ch_id) {
-		ch = &nd->rx;
-	} else if (nd->tx.linked && channel_idx == nd->tx.ch_id) {
-		ch = &nd->tx;
-	} else {
-		ret = -EINVAL;
-		goto unlock;
-	}
-
-	if (nd->rx.linked && nd->tx.linked) {
-		spin_lock_irqsave(&list_lock, flags);
-		ch->linked = false;
-		spin_unlock_irqrestore(&list_lock, flags);
-
-		/*
-		 * do not call most_stop_channel() here, because channels are
-		 * going to be closed in ndo_stop() after unregister_netdev()
-		 */
-		unregister_netdev(nd->dev);
-	} else {
-		spin_lock_irqsave(&list_lock, flags);
-		list_del(&nd->list);
-		spin_unlock_irqrestore(&list_lock, flags);
-
-		free_netdev(nd->dev);
-	}
-
-unlock:
-	mutex_unlock(&probe_disc_mt);
-	return ret;
-}
-
-static int aim_resume_tx_channel(struct most_interface *iface,
-				 int channel_idx)
-{
-	struct net_dev_context *nd;
-
-	nd = get_net_dev_hold(iface);
-	if (!nd)
-		return 0;
-
-	if (nd->tx.ch_id != channel_idx)
-		goto put_nd;
-
-	netif_wake_queue(nd->dev);
-
-put_nd:
-	dev_put(nd->dev);
-	return 0;
-}
-
-static int aim_rx_data(struct mbo *mbo)
-{
-	const u32 zero = 0;
-	struct net_dev_context *nd;
-	char *buf = mbo->virt_address;
-	u32 len = mbo->processed_length;
-	struct sk_buff *skb;
-	struct net_device *dev;
-	unsigned int skb_len;
-	int ret = 0;
-
-	nd = get_net_dev_hold(mbo->ifp);
-	if (!nd)
-		return -EIO;
-
-	if (nd->rx.ch_id != mbo->hdm_channel_id) {
-		ret = -EIO;
-		goto put_nd;
-	}
-
-	dev = nd->dev;
-
-	if (nd->is_mamac) {
-		if (!PMS_IS_MAMAC(buf, len)) {
-			ret = -EIO;
-			goto put_nd;
-		}
-
-		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
-	} else {
-		if (!PMS_IS_MEP(buf, len)) {
-			ret = -EIO;
-			goto put_nd;
-		}
-
-		skb = dev_alloc_skb(len - MEP_HDR_LEN);
-	}
-
-	if (!skb) {
-		dev->stats.rx_dropped++;
-		pr_err_once("drop packet: no memory for skb\n");
-		goto out;
-	}
-
-	skb->dev = dev;
-
-	if (nd->is_mamac) {
-		/* dest */
-		ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
-
-		/* src */
-		skb_put_data(skb, &zero, 4);
-		skb_put_data(skb, buf + 5, 2);
-
-		/* eth type */
-		skb_put_data(skb, buf + 10, 2);
-
-		buf += MDP_HDR_LEN;
-		len -= MDP_HDR_LEN;
-	} else {
-		buf += MEP_HDR_LEN;
-		len -= MEP_HDR_LEN;
-	}
-
-	skb_put_data(skb, buf, len);
-	skb->protocol = eth_type_trans(skb, dev);
-	skb_len = skb->len;
-	if (netif_rx(skb) == NET_RX_SUCCESS) {
-		dev->stats.rx_packets++;
-		dev->stats.rx_bytes += skb_len;
-	} else {
-		dev->stats.rx_dropped++;
-	}
-
-out:
-	most_put_mbo(mbo);
-
-put_nd:
-	dev_put(nd->dev);
-	return ret;
-}
-
-static struct most_aim aim = {
-	.name = "networking",
-	.probe_channel = aim_probe_channel,
-	.disconnect_channel = aim_disconnect_channel,
-	.tx_completion = aim_resume_tx_channel,
-	.rx_completion = aim_rx_data,
-};
-
-static int __init most_net_init(void)
-{
-	spin_lock_init(&list_lock);
-	mutex_init(&probe_disc_mt);
-	return most_register_aim(&aim);
-}
-
-static void __exit most_net_exit(void)
-{
-	most_deregister_aim(&aim);
-}
-
-/**
- * on_netinfo - callback for HDM to be informed about HW's MAC
- * @param iface - most interface instance
- * @param link_stat - link status
- * @param mac_addr - MAC address
- */
-static void on_netinfo(struct most_interface *iface,
-		       unsigned char link_stat, unsigned char *mac_addr)
-{
-	struct net_dev_context *nd;
-	struct net_device *dev;
-	const u8 *m = mac_addr;
-
-	nd = get_net_dev_hold(iface);
-	if (!nd)
-		return;
-
-	dev = nd->dev;
-
-	if (link_stat)
-		netif_carrier_on(dev);
-	else
-		netif_carrier_off(dev);
-
-	if (m && is_valid_ether_addr(m)) {
-		if (!is_valid_ether_addr(dev->dev_addr)) {
-			netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
-				    m[0], m[1], m[2], m[3], m[4], m[5]);
-			ether_addr_copy(dev->dev_addr, m);
-			netif_dormant_off(dev);
-		} else if (!ether_addr_equal(dev->dev_addr, m)) {
-			netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
-				    m[0], m[1], m[2], m[3], m[4], m[5]);
-		}
-	}
-
-	dev_put(nd->dev);
-}
-
-module_init(most_net_init);
-module_exit(most_net_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@xxxxxx>");
-MODULE_DESCRIPTION("Networking Application Interface Module for MostCore");
diff --git a/drivers/staging/most/aim-sound/Kconfig b/drivers/staging/most/aim-sound/Kconfig
deleted file mode 100644
index 3194c21..0000000
--- a/drivers/staging/most/aim-sound/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# MOST ALSA configuration
-#
-
-config AIM_SOUND
-	tristate "ALSA AIM"
-	depends on SND
-	select SND_PCM
-	---help---
-	  Say Y here if you want to commumicate via ALSA/sound devices.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called aim_sound.
diff --git a/drivers/staging/most/aim-sound/Makefile b/drivers/staging/most/aim-sound/Makefile
deleted file mode 100644
index beba958..0000000
--- a/drivers/staging/most/aim-sound/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_AIM_SOUND) += aim_sound.o
-
-aim_sound-objs := sound.o
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/aim-sound/sound.c
deleted file mode 100644
index ea1366a..0000000
--- a/drivers/staging/most/aim-sound/sound.c
+++ /dev/null
@@ -1,765 +0,0 @@
-/*
- * sound.c - Audio Application Interface Module for Mostcore
- *
- * Copyright (C) 2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <mostcore.h>
-
-#define DRIVER_NAME "sound"
-
-static struct list_head dev_list;
-static struct most_aim audio_aim;
-
-/**
- * struct channel - private structure to keep channel specific data
- * @substream: stores the substream structure
- * @iface: interface for which the channel belongs to
- * @cfg: channel configuration
- * @card: registered sound card
- * @list: list for private use
- * @id: channel index
- * @period_pos: current period position (ring buffer)
- * @buffer_pos: current buffer position (ring buffer)
- * @is_stream_running: identifies whether a stream is running or not
- * @opened: set when the stream is opened
- * @playback_task: playback thread
- * @playback_waitq: waitq used by playback thread
- */
-struct channel {
-	struct snd_pcm_substream *substream;
-	struct snd_pcm_hardware pcm_hardware;
-	struct most_interface *iface;
-	struct most_channel_config *cfg;
-	struct snd_card *card;
-	struct list_head list;
-	int id;
-	unsigned int period_pos;
-	unsigned int buffer_pos;
-	bool is_stream_running;
-
-	struct task_struct *playback_task;
-	wait_queue_head_t playback_waitq;
-
-	void (*copy_fn)(void *alsa, void *most, unsigned int bytes);
-};
-
-#define MOST_PCM_INFO (SNDRV_PCM_INFO_MMAP | \
-		       SNDRV_PCM_INFO_MMAP_VALID | \
-		       SNDRV_PCM_INFO_BATCH | \
-		       SNDRV_PCM_INFO_INTERLEAVED | \
-		       SNDRV_PCM_INFO_BLOCK_TRANSFER)
-
-#define swap16(val) ( \
-	(((u16)(val) << 8) & (u16)0xFF00) | \
-	(((u16)(val) >> 8) & (u16)0x00FF))
-
-#define swap32(val) ( \
-	(((u32)(val) << 24) & (u32)0xFF000000) | \
-	(((u32)(val) <<  8) & (u32)0x00FF0000) | \
-	(((u32)(val) >>  8) & (u32)0x0000FF00) | \
-	(((u32)(val) >> 24) & (u32)0x000000FF))
-
-static void swap_copy16(u16 *dest, const u16 *source, unsigned int bytes)
-{
-	unsigned int i = 0;
-
-	while (i < (bytes / 2)) {
-		dest[i] = swap16(source[i]);
-		i++;
-	}
-}
-
-static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
-{
-	unsigned int i = 0;
-
-	while (i < bytes - 2) {
-		dest[i] = source[i + 2];
-		dest[i + 1] = source[i + 1];
-		dest[i + 2] = source[i];
-		i += 3;
-	}
-}
-
-static void swap_copy32(u32 *dest, const u32 *source, unsigned int bytes)
-{
-	unsigned int i = 0;
-
-	while (i < bytes / 4) {
-		dest[i] = swap32(source[i]);
-		i++;
-	}
-}
-
-static void alsa_to_most_memcpy(void *alsa, void *most, unsigned int bytes)
-{
-	memcpy(most, alsa, bytes);
-}
-
-static void alsa_to_most_copy16(void *alsa, void *most, unsigned int bytes)
-{
-	swap_copy16(most, alsa, bytes);
-}
-
-static void alsa_to_most_copy24(void *alsa, void *most, unsigned int bytes)
-{
-	swap_copy24(most, alsa, bytes);
-}
-
-static void alsa_to_most_copy32(void *alsa, void *most, unsigned int bytes)
-{
-	swap_copy32(most, alsa, bytes);
-}
-
-static void most_to_alsa_memcpy(void *alsa, void *most, unsigned int bytes)
-{
-	memcpy(alsa, most, bytes);
-}
-
-static void most_to_alsa_copy16(void *alsa, void *most, unsigned int bytes)
-{
-	swap_copy16(alsa, most, bytes);
-}
-
-static void most_to_alsa_copy24(void *alsa, void *most, unsigned int bytes)
-{
-	swap_copy24(alsa, most, bytes);
-}
-
-static void most_to_alsa_copy32(void *alsa, void *most, unsigned int bytes)
-{
-	swap_copy32(alsa, most, bytes);
-}
-
-/**
- * get_channel - get pointer to channel
- * @iface: interface structure
- * @channel_id: channel ID
- *
- * This traverses the channel list and returns the channel matching the
- * ID and interface.
- *
- * Returns pointer to channel on success or NULL otherwise.
- */
-static struct channel *get_channel(struct most_interface *iface,
-				   int channel_id)
-{
-	struct channel *channel, *tmp;
-
-	list_for_each_entry_safe(channel, tmp, &dev_list, list) {
-		if ((channel->iface == iface) && (channel->id == channel_id))
-			return channel;
-	}
-
-	return NULL;
-}
-
-/**
- * copy_data - implements data copying function
- * @channel: channel
- * @mbo: MBO from core
- *
- * Copy data from/to ring buffer to/from MBO and update the buffer position
- */
-static bool copy_data(struct channel *channel, struct mbo *mbo)
-{
-	struct snd_pcm_runtime *const runtime = channel->substream->runtime;
-	unsigned int const frame_bytes = channel->cfg->subbuffer_size;
-	unsigned int const buffer_size = runtime->buffer_size;
-	unsigned int frames;
-	unsigned int fr0;
-
-	if (channel->cfg->direction & MOST_CH_RX)
-		frames = mbo->processed_length / frame_bytes;
-	else
-		frames = mbo->buffer_length / frame_bytes;
-	fr0 = min(buffer_size - channel->buffer_pos, frames);
-
-	channel->copy_fn(runtime->dma_area + channel->buffer_pos * frame_bytes,
-			 mbo->virt_address,
-			 fr0 * frame_bytes);
-
-	if (frames > fr0) {
-		/* wrap around at end of ring buffer */
-		channel->copy_fn(runtime->dma_area,
-				 mbo->virt_address + fr0 * frame_bytes,
-				 (frames - fr0) * frame_bytes);
-	}
-
-	channel->buffer_pos += frames;
-	if (channel->buffer_pos >= buffer_size)
-		channel->buffer_pos -= buffer_size;
-	channel->period_pos += frames;
-	if (channel->period_pos >= runtime->period_size) {
-		channel->period_pos -= runtime->period_size;
-		return true;
-	}
-
-	return false;
-}
-
-/**
- * playback_thread - function implements the playback thread
- * @data: private data
- *
- * Thread which does the playback functionality in a loop. It waits for a free
- * MBO from mostcore for a particular channel and copy the data from ring buffer
- * to MBO. Submit the MBO back to mostcore, after copying the data.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int playback_thread(void *data)
-{
-	struct channel *const channel = data;
-
-	while (!kthread_should_stop()) {
-		struct mbo *mbo = NULL;
-		bool period_elapsed = false;
-
-		wait_event_interruptible(
-			channel->playback_waitq,
-			kthread_should_stop() ||
-			(channel->is_stream_running &&
-			 (mbo = most_get_mbo(channel->iface, channel->id,
-					     &audio_aim))));
-		if (!mbo)
-			continue;
-
-		if (channel->is_stream_running)
-			period_elapsed = copy_data(channel, mbo);
-		else
-			memset(mbo->virt_address, 0, mbo->buffer_length);
-
-		most_submit_mbo(mbo);
-		if (period_elapsed)
-			snd_pcm_period_elapsed(channel->substream);
-	}
-
-	return 0;
-}
-
-/**
- * pcm_open - implements open callback function for PCM middle layer
- * @substream: pointer to ALSA PCM substream
- *
- * This is called when a PCM substream is opened. At least, the function should
- * initialize the runtime->hw record.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_open(struct snd_pcm_substream *substream)
-{
-	struct channel *channel = substream->private_data;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct most_channel_config *cfg = channel->cfg;
-
-	channel->substream = substream;
-
-	if (cfg->direction == MOST_CH_TX) {
-		channel->playback_task = kthread_run(playback_thread, channel,
-						     "most_audio_playback");
-		if (IS_ERR(channel->playback_task)) {
-			pr_err("Couldn't start thread\n");
-			return PTR_ERR(channel->playback_task);
-		}
-	}
-
-	if (most_start_channel(channel->iface, channel->id, &audio_aim)) {
-		pr_err("most_start_channel() failed!\n");
-		if (cfg->direction == MOST_CH_TX)
-			kthread_stop(channel->playback_task);
-		return -EBUSY;
-	}
-
-	runtime->hw = channel->pcm_hardware;
-	return 0;
-}
-
-/**
- * pcm_close - implements close callback function for PCM middle layer
- * @substream: sub-stream pointer
- *
- * Obviously, this is called when a PCM substream is closed. Any private
- * instance for a PCM substream allocated in the open callback will be
- * released here.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_close(struct snd_pcm_substream *substream)
-{
-	struct channel *channel = substream->private_data;
-
-	if (channel->cfg->direction == MOST_CH_TX)
-		kthread_stop(channel->playback_task);
-	most_stop_channel(channel->iface, channel->id, &audio_aim);
-
-	return 0;
-}
-
-/**
- * pcm_hw_params - implements hw_params callback function for PCM middle layer
- * @substream: sub-stream pointer
- * @hw_params: contains the hardware parameters set by the application
- *
- * This is called when the hardware parameters is set by the application, that
- * is, once when the buffer size, the period size, the format, etc. are defined
- * for the PCM substream. Many hardware setups should be done is this callback,
- * including the allocation of buffers.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_params(struct snd_pcm_substream *substream,
-			 struct snd_pcm_hw_params *hw_params)
-{
-	struct channel *channel = substream->private_data;
-
-	if ((params_channels(hw_params) > channel->pcm_hardware.channels_max) ||
-	    (params_channels(hw_params) < channel->pcm_hardware.channels_min)) {
-		pr_err("Requested number of channels not supported.\n");
-		return -EINVAL;
-	}
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
-}
-
-/**
- * pcm_hw_free - implements hw_free callback function for PCM middle layer
- * @substream: substream pointer
- *
- * This is called to release the resources allocated via hw_params.
- * This function will be always called before the close callback is called.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_hw_free(struct snd_pcm_substream *substream)
-{
-	return snd_pcm_lib_free_vmalloc_buffer(substream);
-}
-
-/**
- * pcm_prepare - implements prepare callback function for PCM middle layer
- * @substream: substream pointer
- *
- * This callback is called when the PCM is "prepared". Format rate, sample rate,
- * etc., can be set here. This callback can be called many times at each setup.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_prepare(struct snd_pcm_substream *substream)
-{
-	struct channel *channel = substream->private_data;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct most_channel_config *cfg = channel->cfg;
-	int width = snd_pcm_format_physical_width(runtime->format);
-
-	channel->copy_fn = NULL;
-
-	if (cfg->direction == MOST_CH_TX) {
-		if (snd_pcm_format_big_endian(runtime->format) || width == 8)
-			channel->copy_fn = alsa_to_most_memcpy;
-		else if (width == 16)
-			channel->copy_fn = alsa_to_most_copy16;
-		else if (width == 24)
-			channel->copy_fn = alsa_to_most_copy24;
-		else if (width == 32)
-			channel->copy_fn = alsa_to_most_copy32;
-	} else {
-		if (snd_pcm_format_big_endian(runtime->format) || width == 8)
-			channel->copy_fn = most_to_alsa_memcpy;
-		else if (width == 16)
-			channel->copy_fn = most_to_alsa_copy16;
-		else if (width == 24)
-			channel->copy_fn = most_to_alsa_copy24;
-		else if (width == 32)
-			channel->copy_fn = most_to_alsa_copy32;
-	}
-
-	if (!channel->copy_fn) {
-		pr_err("unsupported format\n");
-		return -EINVAL;
-	}
-
-	channel->period_pos = 0;
-	channel->buffer_pos = 0;
-
-	return 0;
-}
-
-/**
- * pcm_trigger - implements trigger callback function for PCM middle layer
- * @substream: substream pointer
- * @cmd: action to perform
- *
- * This is called when the PCM is started, stopped or paused. The action will be
- * specified in the second argument, SNDRV_PCM_TRIGGER_XXX
- *
- * Returns 0 on success or error code otherwise.
- */
-static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	struct channel *channel = substream->private_data;
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-		channel->is_stream_running = true;
-		wake_up_interruptible(&channel->playback_waitq);
-		return 0;
-
-	case SNDRV_PCM_TRIGGER_STOP:
-		channel->is_stream_running = false;
-		return 0;
-
-	default:
-		pr_info("%s(), invalid\n", __func__);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-/**
- * pcm_pointer - implements pointer callback function for PCM middle layer
- * @substream: substream pointer
- *
- * This callback is called when the PCM middle layer inquires the current
- * hardware position on the buffer. The position must be returned in frames,
- * ranging from 0 to buffer_size-1.
- */
-static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
-{
-	struct channel *channel = substream->private_data;
-
-	return channel->buffer_pos;
-}
-
-/**
- * Initialization of struct snd_pcm_ops
- */
-static const struct snd_pcm_ops pcm_ops = {
-	.open       = pcm_open,
-	.close      = pcm_close,
-	.ioctl      = snd_pcm_lib_ioctl,
-	.hw_params  = pcm_hw_params,
-	.hw_free    = pcm_hw_free,
-	.prepare    = pcm_prepare,
-	.trigger    = pcm_trigger,
-	.pointer    = pcm_pointer,
-	.page       = snd_pcm_lib_get_vmalloc_page,
-	.mmap       = snd_pcm_lib_mmap_vmalloc,
-};
-
-static int split_arg_list(char *buf, char **card_name, char **pcm_format)
-{
-	*card_name = strsep(&buf, ".");
-	if (!*card_name)
-		return -EIO;
-	*pcm_format = strsep(&buf, ".\n");
-	if (!*pcm_format)
-		return -EIO;
-	return 0;
-}
-
-static int audio_set_hw_params(struct snd_pcm_hardware *pcm_hw,
-			       char *pcm_format,
-			       struct most_channel_config *cfg)
-{
-	pcm_hw->info = MOST_PCM_INFO;
-	pcm_hw->rates = SNDRV_PCM_RATE_48000;
-	pcm_hw->rate_min = 48000;
-	pcm_hw->rate_max = 48000;
-	pcm_hw->buffer_bytes_max = cfg->num_buffers * cfg->buffer_size;
-	pcm_hw->period_bytes_min = cfg->buffer_size;
-	pcm_hw->period_bytes_max = cfg->buffer_size;
-	pcm_hw->periods_min = 1;
-	pcm_hw->periods_max = cfg->num_buffers;
-
-	if (!strcmp(pcm_format, "1x8")) {
-		if (cfg->subbuffer_size != 1)
-			goto error;
-		pr_info("PCM format is 8-bit mono\n");
-		pcm_hw->channels_min = 1;
-		pcm_hw->channels_max = 1;
-		pcm_hw->formats = SNDRV_PCM_FMTBIT_S8;
-	} else if (!strcmp(pcm_format, "2x16")) {
-		if (cfg->subbuffer_size != 4)
-			goto error;
-		pr_info("PCM format is 16-bit stereo\n");
-		pcm_hw->channels_min = 2;
-		pcm_hw->channels_max = 2;
-		pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
-				  SNDRV_PCM_FMTBIT_S16_BE;
-	} else if (!strcmp(pcm_format, "2x24")) {
-		if (cfg->subbuffer_size != 6)
-			goto error;
-		pr_info("PCM format is 24-bit stereo\n");
-		pcm_hw->channels_min = 2;
-		pcm_hw->channels_max = 2;
-		pcm_hw->formats = SNDRV_PCM_FMTBIT_S24_3LE |
-				  SNDRV_PCM_FMTBIT_S24_3BE;
-	} else if (!strcmp(pcm_format, "2x32")) {
-		if (cfg->subbuffer_size != 8)
-			goto error;
-		pr_info("PCM format is 32-bit stereo\n");
-		pcm_hw->channels_min = 2;
-		pcm_hw->channels_max = 2;
-		pcm_hw->formats = SNDRV_PCM_FMTBIT_S32_LE |
-				  SNDRV_PCM_FMTBIT_S32_BE;
-	} else if (!strcmp(pcm_format, "6x16")) {
-		if (cfg->subbuffer_size != 12)
-			goto error;
-		pr_info("PCM format is 16-bit 5.1 multi channel\n");
-		pcm_hw->channels_min = 6;
-		pcm_hw->channels_max = 6;
-		pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
-				  SNDRV_PCM_FMTBIT_S16_BE;
-	} else {
-		pr_err("PCM format %s not supported\n", pcm_format);
-		return -EIO;
-	}
-	return 0;
-error:
-	pr_err("Audio resolution doesn't fit subbuffer size\n");
-	return -EINVAL;
-}
-
-/**
- * audio_probe_channel - probe function of the driver module
- * @iface: pointer to interface instance
- * @channel_id: channel index/ID
- * @cfg: pointer to actual channel configuration
- * @parent: pointer to kobject (needed for sysfs hook-up)
- * @arg_list: string that provides the name of the device to be created in /dev
- *	      plus the desired audio resolution
- *
- * Creates sound card, pcm device, sets pcm ops and registers sound card.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int audio_probe_channel(struct most_interface *iface, int channel_id,
-			       struct most_channel_config *cfg,
-			       struct kobject *parent, char *arg_list)
-{
-	struct channel *channel;
-	struct snd_card *card;
-	struct snd_pcm *pcm;
-	int playback_count = 0;
-	int capture_count = 0;
-	int ret;
-	int direction;
-	char *card_name;
-	char *pcm_format;
-
-	if (!iface)
-		return -EINVAL;
-
-	if (cfg->data_type != MOST_CH_SYNC) {
-		pr_err("Incompatible channel type\n");
-		return -EINVAL;
-	}
-
-	if (get_channel(iface, channel_id)) {
-		pr_err("channel (%s:%d) is already linked\n",
-		       iface->description, channel_id);
-		return -EINVAL;
-	}
-
-	if (cfg->direction == MOST_CH_TX) {
-		playback_count = 1;
-		direction = SNDRV_PCM_STREAM_PLAYBACK;
-	} else {
-		capture_count = 1;
-		direction = SNDRV_PCM_STREAM_CAPTURE;
-	}
-
-	ret = split_arg_list(arg_list, &card_name, &pcm_format);
-	if (ret < 0) {
-		pr_info("PCM format missing\n");
-		return ret;
-	}
-
-	ret = snd_card_new(NULL, -1, card_name, THIS_MODULE,
-			   sizeof(*channel), &card);
-	if (ret < 0)
-		return ret;
-
-	channel = card->private_data;
-	channel->card = card;
-	channel->cfg = cfg;
-	channel->iface = iface;
-	channel->id = channel_id;
-	init_waitqueue_head(&channel->playback_waitq);
-
-	ret = audio_set_hw_params(&channel->pcm_hardware, pcm_format, cfg);
-	if (ret)
-		goto err_free_card;
-
-	snprintf(card->driver, sizeof(card->driver), "%s", DRIVER_NAME);
-	snprintf(card->shortname, sizeof(card->shortname), "Microchip MOST:%d",
-		 card->number);
-	snprintf(card->longname, sizeof(card->longname), "%s at %s, ch %d",
-		 card->shortname, iface->description, channel_id);
-
-	ret = snd_pcm_new(card, card_name, 0, playback_count,
-			  capture_count, &pcm);
-	if (ret < 0)
-		goto err_free_card;
-
-	pcm->private_data = channel;
-
-	snd_pcm_set_ops(pcm, direction, &pcm_ops);
-
-	ret = snd_card_register(card);
-	if (ret < 0)
-		goto err_free_card;
-
-	list_add_tail(&channel->list, &dev_list);
-
-	return 0;
-
-err_free_card:
-	snd_card_free(card);
-	return ret;
-}
-
-/**
- * audio_disconnect_channel - function to disconnect a channel
- * @iface: pointer to interface instance
- * @channel_id: channel index
- *
- * This frees allocated memory and removes the sound card from ALSA
- *
- * Returns 0 on success or error code otherwise.
- */
-static int audio_disconnect_channel(struct most_interface *iface,
-				    int channel_id)
-{
-	struct channel *channel;
-
-	channel = get_channel(iface, channel_id);
-	if (!channel) {
-		pr_err("sound_disconnect_channel(), invalid channel %d\n",
-		       channel_id);
-		return -EINVAL;
-	}
-
-	list_del(&channel->list);
-	snd_card_free(channel->card);
-
-	return 0;
-}
-
-/**
- * audio_rx_completion - completion handler for rx channels
- * @mbo: pointer to buffer object that has completed
- *
- * This searches for the channel this MBO belongs to and copy the data from MBO
- * to ring buffer
- *
- * Returns 0 on success or error code otherwise.
- */
-static int audio_rx_completion(struct mbo *mbo)
-{
-	struct channel *channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
-	bool period_elapsed = false;
-
-	if (!channel) {
-		pr_err("sound_rx_completion(), invalid channel %d\n",
-		       mbo->hdm_channel_id);
-		return -EINVAL;
-	}
-
-	if (channel->is_stream_running)
-		period_elapsed = copy_data(channel, mbo);
-
-	most_put_mbo(mbo);
-
-	if (period_elapsed)
-		snd_pcm_period_elapsed(channel->substream);
-
-	return 0;
-}
-
-/**
- * audio_tx_completion - completion handler for tx channels
- * @iface: pointer to interface instance
- * @channel_id: channel index/ID
- *
- * This searches the channel that belongs to this combination of interface
- * pointer and channel ID and wakes a process sitting in the wait queue of
- * this channel.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int audio_tx_completion(struct most_interface *iface, int channel_id)
-{
-	struct channel *channel = get_channel(iface, channel_id);
-
-	if (!channel) {
-		pr_err("sound_tx_completion(), invalid channel %d\n",
-		       channel_id);
-		return -EINVAL;
-	}
-
-	wake_up_interruptible(&channel->playback_waitq);
-
-	return 0;
-}
-
-/**
- * Initialization of the struct most_aim
- */
-static struct most_aim audio_aim = {
-	.name = DRIVER_NAME,
-	.probe_channel = audio_probe_channel,
-	.disconnect_channel = audio_disconnect_channel,
-	.rx_completion = audio_rx_completion,
-	.tx_completion = audio_tx_completion,
-};
-
-static int __init audio_init(void)
-{
-	pr_info("init()\n");
-
-	INIT_LIST_HEAD(&dev_list);
-
-	return most_register_aim(&audio_aim);
-}
-
-static void __exit audio_exit(void)
-{
-	struct channel *channel, *tmp;
-
-	pr_info("exit()\n");
-
-	list_for_each_entry_safe(channel, tmp, &dev_list, list) {
-		list_del(&channel->list);
-		snd_card_free(channel->card);
-	}
-
-	most_deregister_aim(&audio_aim);
-}
-
-module_init(audio_init);
-module_exit(audio_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Gromm <christian.gromm@xxxxxxxxxxxxx>");
-MODULE_DESCRIPTION("Audio Application Interface Module for MostCore");
diff --git a/drivers/staging/most/aim-v4l2/Kconfig b/drivers/staging/most/aim-v4l2/Kconfig
deleted file mode 100644
index d70eaaf..0000000
--- a/drivers/staging/most/aim-v4l2/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# MOST V4L2 configuration
-#
-
-config AIM_V4L2
-	tristate "V4L2 AIM"
-	depends on VIDEO_V4L2
-	---help---
-	  Say Y here if you want to commumicate via Video 4 Linux.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called aim_v4l2.
\ No newline at end of file
diff --git a/drivers/staging/most/aim-v4l2/Makefile b/drivers/staging/most/aim-v4l2/Makefile
deleted file mode 100644
index 69a7524..0000000
--- a/drivers/staging/most/aim-v4l2/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_AIM_V4L2) += aim_v4l2.o
-
-aim_v4l2-objs := video.o
-
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-v4l2/video.c b/drivers/staging/most/aim-v4l2/video.c
deleted file mode 100644
index e074841..0000000
--- a/drivers/staging/most/aim-v4l2/video.c
+++ /dev/null
@@ -1,616 +0,0 @@
-/*
- * V4L2 AIM - V4L2 Application Interface Module for MostCore
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/suspend.h>
-#include <linux/videodev2.h>
-#include <linux/mutex.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-fh.h>
-
-#include "mostcore.h"
-
-#define V4L2_AIM_MAX_INPUT  1
-
-static struct most_aim aim_info;
-
-struct most_video_dev {
-	struct most_interface *iface;
-	int ch_idx;
-	struct list_head list;
-	bool mute;
-
-	struct list_head pending_mbos;
-	spinlock_t list_lock;
-
-	struct v4l2_device v4l2_dev;
-	atomic_t access_ref;
-	struct video_device *vdev;
-	unsigned int ctrl_input;
-
-	struct mutex lock;
-
-	wait_queue_head_t wait_data;
-};
-
-struct aim_fh {
-	/* must be the first field of this struct! */
-	struct v4l2_fh fh;
-	struct most_video_dev *mdev;
-	u32 offs;
-};
-
-static struct list_head video_devices = LIST_HEAD_INIT(video_devices);
-static struct spinlock list_lock;
-
-static inline bool data_ready(struct most_video_dev *mdev)
-{
-	return !list_empty(&mdev->pending_mbos);
-}
-
-static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
-{
-	return list_first_entry(&mdev->pending_mbos, struct mbo, list);
-}
-
-static int aim_vdev_open(struct file *filp)
-{
-	int ret;
-	struct video_device *vdev = video_devdata(filp);
-	struct most_video_dev *mdev = video_drvdata(filp);
-	struct aim_fh *fh;
-
-	v4l2_info(&mdev->v4l2_dev, "aim_vdev_open()\n");
-
-	switch (vdev->vfl_type) {
-	case VFL_TYPE_GRABBER:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	fh = kzalloc(sizeof(*fh), GFP_KERNEL);
-	if (!fh)
-		return -ENOMEM;
-
-	if (!atomic_inc_and_test(&mdev->access_ref)) {
-		v4l2_err(&mdev->v4l2_dev, "too many clients\n");
-		ret = -EBUSY;
-		goto err_dec;
-	}
-
-	fh->mdev = mdev;
-	v4l2_fh_init(&fh->fh, vdev);
-	filp->private_data = fh;
-
-	v4l2_fh_add(&fh->fh);
-
-	ret = most_start_channel(mdev->iface, mdev->ch_idx, &aim_info);
-	if (ret) {
-		v4l2_err(&mdev->v4l2_dev, "most_start_channel() failed\n");
-		goto err_rm;
-	}
-
-	return 0;
-
-err_rm:
-	v4l2_fh_del(&fh->fh);
-	v4l2_fh_exit(&fh->fh);
-
-err_dec:
-	atomic_dec(&mdev->access_ref);
-	kfree(fh);
-	return ret;
-}
-
-static int aim_vdev_close(struct file *filp)
-{
-	struct aim_fh *fh = filp->private_data;
-	struct most_video_dev *mdev = fh->mdev;
-	struct mbo *mbo, *tmp;
-
-	v4l2_info(&mdev->v4l2_dev, "aim_vdev_close()\n");
-
-	/*
-	 * We need to put MBOs back before we call most_stop_channel()
-	 * to deallocate MBOs.
-	 * From the other hand mostcore still calling rx_completion()
-	 * to deliver MBOs until most_stop_channel() is called.
-	 * Use mute to work around this issue.
-	 * This must be implemented in core.
-	 */
-
-	spin_lock_irq(&mdev->list_lock);
-	mdev->mute = true;
-	list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
-		list_del(&mbo->list);
-		spin_unlock_irq(&mdev->list_lock);
-		most_put_mbo(mbo);
-		spin_lock_irq(&mdev->list_lock);
-	}
-	spin_unlock_irq(&mdev->list_lock);
-	most_stop_channel(mdev->iface, mdev->ch_idx, &aim_info);
-	mdev->mute = false;
-
-	v4l2_fh_del(&fh->fh);
-	v4l2_fh_exit(&fh->fh);
-
-	atomic_dec(&mdev->access_ref);
-	kfree(fh);
-	return 0;
-}
-
-static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
-			     size_t count, loff_t *pos)
-{
-	struct aim_fh *fh = filp->private_data;
-	struct most_video_dev *mdev = fh->mdev;
-	int ret = 0;
-
-	if (*pos)
-		return -ESPIPE;
-
-	if (!mdev)
-		return -ENODEV;
-
-	/* wait for the first buffer */
-	if (!(filp->f_flags & O_NONBLOCK)) {
-		if (wait_event_interruptible(mdev->wait_data, data_ready(mdev)))
-			return -ERESTARTSYS;
-	}
-
-	if (!data_ready(mdev))
-		return -EAGAIN;
-
-	while (count > 0 && data_ready(mdev)) {
-		struct mbo *const mbo = get_top_mbo(mdev);
-		int const rem = mbo->processed_length - fh->offs;
-		int const cnt = rem < count ? rem : count;
-
-		if (copy_to_user(buf, mbo->virt_address + fh->offs, cnt)) {
-			v4l2_err(&mdev->v4l2_dev, "read: copy_to_user failed\n");
-			if (!ret)
-				ret = -EFAULT;
-			return ret;
-		}
-
-		fh->offs += cnt;
-		count -= cnt;
-		buf += cnt;
-		ret += cnt;
-
-		if (cnt >= rem) {
-			fh->offs = 0;
-			spin_lock_irq(&mdev->list_lock);
-			list_del(&mbo->list);
-			spin_unlock_irq(&mdev->list_lock);
-			most_put_mbo(mbo);
-		}
-	}
-	return ret;
-}
-
-static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
-{
-	struct aim_fh *fh = filp->private_data;
-	struct most_video_dev *mdev = fh->mdev;
-	unsigned int mask = 0;
-
-	/* only wait if no data is available */
-	if (!data_ready(mdev))
-		poll_wait(filp, &mdev->wait_data, wait);
-	if (data_ready(mdev))
-		mask |= POLLIN | POLLRDNORM;
-
-	return mask;
-}
-
-static void aim_set_format_struct(struct v4l2_format *f)
-{
-	f->fmt.pix.width = 8;
-	f->fmt.pix.height = 8;
-	f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
-	f->fmt.pix.bytesperline = 0;
-	f->fmt.pix.sizeimage = 188 * 2;
-	f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
-	f->fmt.pix.field = V4L2_FIELD_NONE;
-	f->fmt.pix.priv = 0;
-}
-
-static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
-			  struct v4l2_format *format)
-{
-	if (format->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG)
-		return -EINVAL;
-
-	if (cmd == VIDIOC_TRY_FMT)
-		return 0;
-
-	aim_set_format_struct(format);
-
-	return 0;
-}
-
-static int vidioc_querycap(struct file *file, void *priv,
-			   struct v4l2_capability *cap)
-{
-	struct aim_fh *fh = priv;
-	struct most_video_dev *mdev = fh->mdev;
-
-	v4l2_info(&mdev->v4l2_dev, "vidioc_querycap()\n");
-
-	strlcpy(cap->driver, "v4l2_most_aim", sizeof(cap->driver));
-	strlcpy(cap->card, "MOST", sizeof(cap->card));
-	snprintf(cap->bus_info, sizeof(cap->bus_info),
-		 "%s", mdev->iface->description);
-
-	cap->capabilities =
-		V4L2_CAP_READWRITE |
-		V4L2_CAP_TUNER |
-		V4L2_CAP_VIDEO_CAPTURE;
-	return 0;
-}
-
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
-				   struct v4l2_fmtdesc *f)
-{
-	struct aim_fh *fh = priv;
-	struct most_video_dev *mdev = fh->mdev;
-
-	v4l2_info(&mdev->v4l2_dev, "vidioc_enum_fmt_vid_cap() %d\n", f->index);
-
-	if (f->index)
-		return -EINVAL;
-
-	strcpy(f->description, "MPEG");
-	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-	f->flags = V4L2_FMT_FLAG_COMPRESSED;
-	f->pixelformat = V4L2_PIX_FMT_MPEG;
-
-	return 0;
-}
-
-static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
-				struct v4l2_format *f)
-{
-	struct aim_fh *fh = priv;
-	struct most_video_dev *mdev = fh->mdev;
-
-	v4l2_info(&mdev->v4l2_dev, "vidioc_g_fmt_vid_cap()\n");
-
-	aim_set_format_struct(f);
-	return 0;
-}
-
-static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
-				  struct v4l2_format *f)
-{
-	struct aim_fh *fh = priv;
-	struct most_video_dev *mdev = fh->mdev;
-
-	return aim_set_format(mdev, VIDIOC_TRY_FMT, f);
-}
-
-static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
-				struct v4l2_format *f)
-{
-	struct aim_fh *fh = priv;
-	struct most_video_dev *mdev = fh->mdev;
-
-	return aim_set_format(mdev, VIDIOC_S_FMT, f);
-}
-
-static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
-{
-	struct aim_fh *fh = priv;
-	struct most_video_dev *mdev = fh->mdev;
-
-	v4l2_info(&mdev->v4l2_dev, "vidioc_g_std()\n");
-
-	*norm = V4L2_STD_UNKNOWN;
-	return 0;
-}
-
-static int vidioc_enum_input(struct file *file, void *priv,
-			     struct v4l2_input *input)
-{
-	struct aim_fh *fh = priv;
-	struct most_video_dev *mdev = fh->mdev;
-
-	if (input->index >= V4L2_AIM_MAX_INPUT)
-		return -EINVAL;
-
-	strcpy(input->name, "MOST Video");
-	input->type |= V4L2_INPUT_TYPE_CAMERA;
-	input->audioset = 0;
-
-	input->std = mdev->vdev->tvnorms;
-
-	return 0;
-}
-
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
-{
-	struct aim_fh *fh = priv;
-	struct most_video_dev *mdev = fh->mdev;
-	*i = mdev->ctrl_input;
-	return 0;
-}
-
-static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
-{
-	struct aim_fh *fh = priv;
-	struct most_video_dev *mdev = fh->mdev;
-
-	v4l2_info(&mdev->v4l2_dev, "vidioc_s_input(%d)\n", index);
-
-	if (index >= V4L2_AIM_MAX_INPUT)
-		return -EINVAL;
-	mdev->ctrl_input = index;
-	return 0;
-}
-
-static const struct v4l2_file_operations aim_fops = {
-	.owner      = THIS_MODULE,
-	.open       = aim_vdev_open,
-	.release    = aim_vdev_close,
-	.read       = aim_vdev_read,
-	.poll       = aim_vdev_poll,
-	.unlocked_ioctl = video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops video_ioctl_ops = {
-	.vidioc_querycap            = vidioc_querycap,
-	.vidioc_enum_fmt_vid_cap    = vidioc_enum_fmt_vid_cap,
-	.vidioc_g_fmt_vid_cap       = vidioc_g_fmt_vid_cap,
-	.vidioc_try_fmt_vid_cap     = vidioc_try_fmt_vid_cap,
-	.vidioc_s_fmt_vid_cap       = vidioc_s_fmt_vid_cap,
-	.vidioc_g_std               = vidioc_g_std,
-	.vidioc_enum_input          = vidioc_enum_input,
-	.vidioc_g_input             = vidioc_g_input,
-	.vidioc_s_input             = vidioc_s_input,
-};
-
-static const struct video_device aim_videodev_template = {
-	.fops = &aim_fops,
-	.release = video_device_release,
-	.ioctl_ops = &video_ioctl_ops,
-	.tvnorms = V4L2_STD_UNKNOWN,
-};
-
-/**************************************************************************/
-
-static struct most_video_dev *get_aim_dev(
-	struct most_interface *iface, int channel_idx)
-{
-	struct most_video_dev *mdev;
-	unsigned long flags;
-
-	spin_lock_irqsave(&list_lock, flags);
-	list_for_each_entry(mdev, &video_devices, list) {
-		if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
-			spin_unlock_irqrestore(&list_lock, flags);
-			return mdev;
-		}
-	}
-	spin_unlock_irqrestore(&list_lock, flags);
-	return NULL;
-}
-
-static int aim_rx_data(struct mbo *mbo)
-{
-	unsigned long flags;
-	struct most_video_dev *mdev =
-		get_aim_dev(mbo->ifp, mbo->hdm_channel_id);
-
-	if (!mdev)
-		return -EIO;
-
-	spin_lock_irqsave(&mdev->list_lock, flags);
-	if (unlikely(mdev->mute)) {
-		spin_unlock_irqrestore(&mdev->list_lock, flags);
-		return -EIO;
-	}
-
-	list_add_tail(&mbo->list, &mdev->pending_mbos);
-	spin_unlock_irqrestore(&mdev->list_lock, flags);
-	wake_up_interruptible(&mdev->wait_data);
-	return 0;
-}
-
-static int aim_register_videodev(struct most_video_dev *mdev)
-{
-	int ret;
-
-	v4l2_info(&mdev->v4l2_dev, "aim_register_videodev()\n");
-
-	init_waitqueue_head(&mdev->wait_data);
-
-	/* allocate and fill v4l2 video struct */
-	mdev->vdev = video_device_alloc();
-	if (!mdev->vdev)
-		return -ENOMEM;
-
-	/* Fill the video capture device struct */
-	*mdev->vdev = aim_videodev_template;
-	mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
-	mdev->vdev->lock = &mdev->lock;
-	snprintf(mdev->vdev->name, sizeof(mdev->vdev->name), "MOST: %s",
-		 mdev->v4l2_dev.name);
-
-	/* Register the v4l2 device */
-	video_set_drvdata(mdev->vdev, mdev);
-	ret = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
-	if (ret) {
-		v4l2_err(&mdev->v4l2_dev, "video_register_device failed (%d)\n",
-			 ret);
-		video_device_release(mdev->vdev);
-	}
-
-	return ret;
-}
-
-static void aim_unregister_videodev(struct most_video_dev *mdev)
-{
-	v4l2_info(&mdev->v4l2_dev, "aim_unregister_videodev()\n");
-
-	video_unregister_device(mdev->vdev);
-}
-
-static void aim_v4l2_dev_release(struct v4l2_device *v4l2_dev)
-{
-	struct most_video_dev *mdev =
-		container_of(v4l2_dev, struct most_video_dev, v4l2_dev);
-
-	v4l2_device_unregister(v4l2_dev);
-	kfree(mdev);
-}
-
-static int aim_probe_channel(struct most_interface *iface, int channel_idx,
-			     struct most_channel_config *ccfg,
-			     struct kobject *parent, char *name)
-{
-	int ret;
-	struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
-
-	pr_info("aim_probe_channel(%s)\n", name);
-
-	if (mdev) {
-		pr_err("channel already linked\n");
-		return -EEXIST;
-	}
-
-	if (ccfg->direction != MOST_CH_RX) {
-		pr_err("wrong direction, expect rx\n");
-		return -EINVAL;
-	}
-
-	if (ccfg->data_type != MOST_CH_SYNC &&
-	    ccfg->data_type != MOST_CH_ISOC) {
-		pr_err("wrong channel type, expect sync or isoc\n");
-		return -EINVAL;
-	}
-
-	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
-	if (!mdev)
-		return -ENOMEM;
-
-	mutex_init(&mdev->lock);
-	atomic_set(&mdev->access_ref, -1);
-	spin_lock_init(&mdev->list_lock);
-	INIT_LIST_HEAD(&mdev->pending_mbos);
-	mdev->iface = iface;
-	mdev->ch_idx = channel_idx;
-	mdev->v4l2_dev.release = aim_v4l2_dev_release;
-
-	/* Create the v4l2_device */
-	strlcpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
-	ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
-	if (ret) {
-		pr_err("v4l2_device_register() failed\n");
-		kfree(mdev);
-		return ret;
-	}
-
-	ret = aim_register_videodev(mdev);
-	if (ret)
-		goto err_unreg;
-
-	spin_lock_irq(&list_lock);
-	list_add(&mdev->list, &video_devices);
-	spin_unlock_irq(&list_lock);
-	v4l2_info(&mdev->v4l2_dev, "aim_probe_channel() done\n");
-	return 0;
-
-err_unreg:
-	v4l2_device_disconnect(&mdev->v4l2_dev);
-	v4l2_device_put(&mdev->v4l2_dev);
-	return ret;
-}
-
-static int aim_disconnect_channel(struct most_interface *iface,
-				  int channel_idx)
-{
-	struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
-
-	if (!mdev) {
-		pr_err("no such channel is linked\n");
-		return -ENOENT;
-	}
-
-	v4l2_info(&mdev->v4l2_dev, "aim_disconnect_channel()\n");
-
-	spin_lock_irq(&list_lock);
-	list_del(&mdev->list);
-	spin_unlock_irq(&list_lock);
-
-	aim_unregister_videodev(mdev);
-	v4l2_device_disconnect(&mdev->v4l2_dev);
-	v4l2_device_put(&mdev->v4l2_dev);
-	return 0;
-}
-
-static struct most_aim aim_info = {
-	.name = "v4l",
-	.probe_channel = aim_probe_channel,
-	.disconnect_channel = aim_disconnect_channel,
-	.rx_completion = aim_rx_data,
-};
-
-static int __init aim_init(void)
-{
-	spin_lock_init(&list_lock);
-	return most_register_aim(&aim_info);
-}
-
-static void __exit aim_exit(void)
-{
-	struct most_video_dev *mdev, *tmp;
-
-	/*
-	 * As the mostcore currently doesn't call disconnect_channel()
-	 * for linked channels while we call most_deregister_aim()
-	 * we simulate this call here.
-	 * This must be fixed in core.
-	 */
-	spin_lock_irq(&list_lock);
-	list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
-		list_del(&mdev->list);
-		spin_unlock_irq(&list_lock);
-
-		aim_unregister_videodev(mdev);
-		v4l2_device_disconnect(&mdev->v4l2_dev);
-		v4l2_device_put(&mdev->v4l2_dev);
-		spin_lock_irq(&list_lock);
-	}
-	spin_unlock_irq(&list_lock);
-
-	most_deregister_aim(&aim_info);
-	BUG_ON(!list_empty(&video_devices));
-}
-
-module_init(aim_init);
-module_exit(aim_exit);
-
-MODULE_DESCRIPTION("V4L2 Application Interface Module for MostCore");
-MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@xxxxxx>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/cdev/Kconfig b/drivers/staging/most/cdev/Kconfig
new file mode 100644
index 0000000..2b04e26
--- /dev/null
+++ b/drivers/staging/most/cdev/Kconfig
@@ -0,0 +1,12 @@
+#
+# MOST Cdev configuration
+#
+
+config MOST_CDEV
+	tristate "Cdev"
+
+	---help---
+	  Say Y here if you want to commumicate via character devices.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called most_cdev.
diff --git a/drivers/staging/most/cdev/Makefile b/drivers/staging/most/cdev/Makefile
new file mode 100644
index 0000000..afb9870
--- /dev/null
+++ b/drivers/staging/most/cdev/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_CDEV) += most_cdev.o
+
+most_cdev-objs := cdev.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
new file mode 100644
index 0000000..3dce5e0
--- /dev/null
+++ b/drivers/staging/most/cdev/cdev.c
@@ -0,0 +1,564 @@
+/*
+ * cdev.c - Application interfacing module for character devices
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
+#include <linux/idr.h>
+#include <most/core.h>
+
+static dev_t aim_devno;
+static struct class *aim_class;
+static struct ida minor_id;
+static unsigned int major;
+static struct most_aim cdev_aim;
+
+struct aim_channel {
+	wait_queue_head_t wq;
+	spinlock_t unlink;	/* synchronization lock to unlink channels */
+	struct cdev cdev;
+	struct device *dev;
+	struct mutex io_mutex;
+	struct most_interface *iface;
+	struct most_channel_config *cfg;
+	unsigned int channel_id;
+	dev_t devno;
+	size_t mbo_offs;
+	DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
+	int access_ref;
+	struct list_head list;
+};
+
+#define to_channel(d) container_of(d, struct aim_channel, cdev)
+static struct list_head channel_list;
+static spinlock_t ch_list_lock;
+
+static inline bool ch_has_mbo(struct aim_channel *c)
+{
+	return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
+}
+
+static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
+{
+	if (!kfifo_peek(&c->fifo, mbo)) {
+		*mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
+		if (*mbo)
+			kfifo_in(&c->fifo, mbo, 1);
+	}
+	return *mbo;
+}
+
+static struct aim_channel *get_channel(struct most_interface *iface, int id)
+{
+	struct aim_channel *c, *tmp;
+	unsigned long flags;
+	int found_channel = 0;
+
+	spin_lock_irqsave(&ch_list_lock, flags);
+	list_for_each_entry_safe(c, tmp, &channel_list, list) {
+		if ((c->iface == iface) && (c->channel_id == id)) {
+			found_channel = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ch_list_lock, flags);
+	if (!found_channel)
+		return NULL;
+	return c;
+}
+
+static void stop_channel(struct aim_channel *c)
+{
+	struct mbo *mbo;
+
+	while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
+		most_put_mbo(mbo);
+	most_stop_channel(c->iface, c->channel_id, &cdev_aim);
+}
+
+static void destroy_cdev(struct aim_channel *c)
+{
+	unsigned long flags;
+
+	device_destroy(aim_class, c->devno);
+	cdev_del(&c->cdev);
+	spin_lock_irqsave(&ch_list_lock, flags);
+	list_del(&c->list);
+	spin_unlock_irqrestore(&ch_list_lock, flags);
+}
+
+static void destroy_channel(struct aim_channel *c)
+{
+	ida_simple_remove(&minor_id, MINOR(c->devno));
+	kfifo_free(&c->fifo);
+	kfree(c);
+}
+
+/**
+ * aim_open - implements the syscall to open the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stores the channel pointer in the private data field of
+ * the file structure and activates the channel within the core.
+ */
+static int aim_open(struct inode *inode, struct file *filp)
+{
+	struct aim_channel *c;
+	int ret;
+
+	c = to_channel(inode->i_cdev);
+	filp->private_data = c;
+
+	if (((c->cfg->direction == MOST_CH_RX) &&
+	     ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
+	     ((c->cfg->direction == MOST_CH_TX) &&
+		((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
+		pr_info("WARN: Access flags mismatch\n");
+		return -EACCES;
+	}
+
+	mutex_lock(&c->io_mutex);
+	if (!c->dev) {
+		pr_info("WARN: Device is destroyed\n");
+		mutex_unlock(&c->io_mutex);
+		return -ENODEV;
+	}
+
+	if (c->access_ref) {
+		pr_info("WARN: Device is busy\n");
+		mutex_unlock(&c->io_mutex);
+		return -EBUSY;
+	}
+
+	c->mbo_offs = 0;
+	ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
+	if (!ret)
+		c->access_ref = 1;
+	mutex_unlock(&c->io_mutex);
+	return ret;
+}
+
+/**
+ * aim_close - implements the syscall to close the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stops the channel within the core.
+ */
+static int aim_close(struct inode *inode, struct file *filp)
+{
+	struct aim_channel *c = to_channel(inode->i_cdev);
+
+	mutex_lock(&c->io_mutex);
+	spin_lock(&c->unlink);
+	c->access_ref = 0;
+	spin_unlock(&c->unlink);
+	if (c->dev) {
+		stop_channel(c);
+		mutex_unlock(&c->io_mutex);
+	} else {
+		mutex_unlock(&c->io_mutex);
+		destroy_channel(c);
+	}
+	return 0;
+}
+
+/**
+ * aim_write - implements the syscall to write to the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to write
+ * @offset: offset from where to start writing
+ */
+static ssize_t aim_write(struct file *filp, const char __user *buf,
+			 size_t count, loff_t *offset)
+{
+	int ret;
+	size_t to_copy, left;
+	struct mbo *mbo = NULL;
+	struct aim_channel *c = filp->private_data;
+
+	mutex_lock(&c->io_mutex);
+	while (c->dev && !ch_get_mbo(c, &mbo)) {
+		mutex_unlock(&c->io_mutex);
+
+		if ((filp->f_flags & O_NONBLOCK))
+			return -EAGAIN;
+		if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
+			return -ERESTARTSYS;
+		mutex_lock(&c->io_mutex);
+	}
+
+	if (unlikely(!c->dev)) {
+		ret = -ENODEV;
+		goto unlock;
+	}
+
+	to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
+	left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
+	if (left == to_copy) {
+		ret = -EFAULT;
+		goto unlock;
+	}
+
+	c->mbo_offs += to_copy - left;
+	if (c->mbo_offs >= c->cfg->buffer_size ||
+	    c->cfg->data_type == MOST_CH_CONTROL ||
+	    c->cfg->data_type == MOST_CH_ASYNC) {
+		kfifo_skip(&c->fifo);
+		mbo->buffer_length = c->mbo_offs;
+		c->mbo_offs = 0;
+		most_submit_mbo(mbo);
+	}
+
+	ret = to_copy - left;
+unlock:
+	mutex_unlock(&c->io_mutex);
+	return ret;
+}
+
+/**
+ * aim_read - implements the syscall to read from the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to read
+ * @offset: offset from where to start reading
+ */
+static ssize_t
+aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
+{
+	size_t to_copy, not_copied, copied;
+	struct mbo *mbo;
+	struct aim_channel *c = filp->private_data;
+
+	mutex_lock(&c->io_mutex);
+	while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
+		mutex_unlock(&c->io_mutex);
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		if (wait_event_interruptible(c->wq,
+					     (!kfifo_is_empty(&c->fifo) ||
+					      (!c->dev))))
+			return -ERESTARTSYS;
+		mutex_lock(&c->io_mutex);
+	}
+
+	/* make sure we don't submit to gone devices */
+	if (unlikely(!c->dev)) {
+		mutex_unlock(&c->io_mutex);
+		return -ENODEV;
+	}
+
+	to_copy = min_t(size_t,
+			count,
+			mbo->processed_length - c->mbo_offs);
+
+	not_copied = copy_to_user(buf,
+				  mbo->virt_address + c->mbo_offs,
+				  to_copy);
+
+	copied = to_copy - not_copied;
+
+	c->mbo_offs += copied;
+	if (c->mbo_offs >= mbo->processed_length) {
+		kfifo_skip(&c->fifo);
+		most_put_mbo(mbo);
+		c->mbo_offs = 0;
+	}
+	mutex_unlock(&c->io_mutex);
+	return copied;
+}
+
+static unsigned int aim_poll(struct file *filp, poll_table *wait)
+{
+	struct aim_channel *c = filp->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(filp, &c->wq, wait);
+
+	if (c->cfg->direction == MOST_CH_RX) {
+		if (!kfifo_is_empty(&c->fifo))
+			mask |= POLLIN | POLLRDNORM;
+	} else {
+		if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
+			mask |= POLLOUT | POLLWRNORM;
+	}
+	return mask;
+}
+
+/**
+ * Initialization of struct file_operations
+ */
+static const struct file_operations channel_fops = {
+	.owner = THIS_MODULE,
+	.read = aim_read,
+	.write = aim_write,
+	.open = aim_open,
+	.release = aim_close,
+	.poll = aim_poll,
+};
+
+/**
+ * aim_disconnect_channel - disconnect a channel
+ * @iface: pointer to interface instance
+ * @channel_id: channel index
+ *
+ * This frees allocated memory and removes the cdev that represents this
+ * channel in user space.
+ */
+static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
+{
+	struct aim_channel *c;
+
+	if (!iface) {
+		pr_info("Bad interface pointer\n");
+		return -EINVAL;
+	}
+
+	c = get_channel(iface, channel_id);
+	if (!c)
+		return -ENXIO;
+
+	mutex_lock(&c->io_mutex);
+	spin_lock(&c->unlink);
+	c->dev = NULL;
+	spin_unlock(&c->unlink);
+	destroy_cdev(c);
+	if (c->access_ref) {
+		stop_channel(c);
+		wake_up_interruptible(&c->wq);
+		mutex_unlock(&c->io_mutex);
+	} else {
+		mutex_unlock(&c->io_mutex);
+		destroy_channel(c);
+	}
+	return 0;
+}
+
+/**
+ * aim_rx_completion - completion handler for rx channels
+ * @mbo: pointer to buffer object that has completed
+ *
+ * This searches for the channel linked to this MBO and stores it in the local
+ * fifo buffer.
+ */
+static int aim_rx_completion(struct mbo *mbo)
+{
+	struct aim_channel *c;
+
+	if (!mbo)
+		return -EINVAL;
+
+	c = get_channel(mbo->ifp, mbo->hdm_channel_id);
+	if (!c)
+		return -ENXIO;
+
+	spin_lock(&c->unlink);
+	if (!c->access_ref || !c->dev) {
+		spin_unlock(&c->unlink);
+		return -ENODEV;
+	}
+	kfifo_in(&c->fifo, &mbo, 1);
+	spin_unlock(&c->unlink);
+#ifdef DEBUG_MESG
+	if (kfifo_is_full(&c->fifo))
+		pr_info("WARN: Fifo is full\n");
+#endif
+	wake_up_interruptible(&c->wq);
+	return 0;
+}
+
+/**
+ * aim_tx_completion - completion handler for tx channels
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ *
+ * This wakes sleeping processes in the wait-queue.
+ */
+static int aim_tx_completion(struct most_interface *iface, int channel_id)
+{
+	struct aim_channel *c;
+
+	if (!iface) {
+		pr_info("Bad interface pointer\n");
+		return -EINVAL;
+	}
+	if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
+		pr_info("Channel ID out of range\n");
+		return -EINVAL;
+	}
+
+	c = get_channel(iface, channel_id);
+	if (!c)
+		return -ENXIO;
+	wake_up_interruptible(&c->wq);
+	return 0;
+}
+
+/**
+ * aim_probe - probe function of the driver module
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ * @cfg: pointer to actual channel configuration
+ * @parent: pointer to kobject (needed for sysfs hook-up)
+ * @name: name of the device to be created
+ *
+ * This allocates achannel object and creates the device node in /dev
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int aim_probe(struct most_interface *iface, int channel_id,
+		     struct most_channel_config *cfg,
+		     struct kobject *parent, char *name)
+{
+	struct aim_channel *c;
+	unsigned long cl_flags;
+	int retval;
+	int current_minor;
+
+	if ((!iface) || (!cfg) || (!parent) || (!name)) {
+		pr_info("Probing AIM with bad arguments");
+		return -EINVAL;
+	}
+	c = get_channel(iface, channel_id);
+	if (c)
+		return -EEXIST;
+
+	current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
+	if (current_minor < 0)
+		return current_minor;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c) {
+		retval = -ENOMEM;
+		goto error_alloc_channel;
+	}
+
+	c->devno = MKDEV(major, current_minor);
+	cdev_init(&c->cdev, &channel_fops);
+	c->cdev.owner = THIS_MODULE;
+	cdev_add(&c->cdev, c->devno, 1);
+	c->iface = iface;
+	c->cfg = cfg;
+	c->channel_id = channel_id;
+	c->access_ref = 0;
+	spin_lock_init(&c->unlink);
+	INIT_KFIFO(c->fifo);
+	retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
+	if (retval) {
+		pr_info("failed to alloc channel kfifo");
+		goto error_alloc_kfifo;
+	}
+	init_waitqueue_head(&c->wq);
+	mutex_init(&c->io_mutex);
+	spin_lock_irqsave(&ch_list_lock, cl_flags);
+	list_add_tail(&c->list, &channel_list);
+	spin_unlock_irqrestore(&ch_list_lock, cl_flags);
+	c->dev = device_create(aim_class,
+				     NULL,
+				     c->devno,
+				     NULL,
+				     "%s", name);
+
+	if (IS_ERR(c->dev)) {
+		retval = PTR_ERR(c->dev);
+		pr_info("failed to create new device node %s\n", name);
+		goto error_create_device;
+	}
+	kobject_uevent(&c->dev->kobj, KOBJ_ADD);
+	return 0;
+
+error_create_device:
+	kfifo_free(&c->fifo);
+	list_del(&c->list);
+error_alloc_kfifo:
+	cdev_del(&c->cdev);
+	kfree(c);
+error_alloc_channel:
+	ida_simple_remove(&minor_id, current_minor);
+	return retval;
+}
+
+static struct most_aim cdev_aim = {
+	.name = "cdev",
+	.probe_channel = aim_probe,
+	.disconnect_channel = aim_disconnect_channel,
+	.rx_completion = aim_rx_completion,
+	.tx_completion = aim_tx_completion,
+};
+
+static int __init mod_init(void)
+{
+	int err;
+
+	pr_info("init()\n");
+
+	INIT_LIST_HEAD(&channel_list);
+	spin_lock_init(&ch_list_lock);
+	ida_init(&minor_id);
+
+	err = alloc_chrdev_region(&aim_devno, 0, 50, "cdev");
+	if (err < 0)
+		goto dest_ida;
+	major = MAJOR(aim_devno);
+
+	aim_class = class_create(THIS_MODULE, "most_cdev_aim");
+	if (IS_ERR(aim_class)) {
+		pr_err("no udev support\n");
+		err = PTR_ERR(aim_class);
+		goto free_cdev;
+	}
+	err = most_register_aim(&cdev_aim);
+	if (err)
+		goto dest_class;
+	return 0;
+
+dest_class:
+	class_destroy(aim_class);
+free_cdev:
+	unregister_chrdev_region(aim_devno, 1);
+dest_ida:
+	ida_destroy(&minor_id);
+	return err;
+}
+
+static void __exit mod_exit(void)
+{
+	struct aim_channel *c, *tmp;
+
+	pr_info("exit module\n");
+
+	most_deregister_aim(&cdev_aim);
+
+	list_for_each_entry_safe(c, tmp, &channel_list, list) {
+		destroy_cdev(c);
+		destroy_channel(c);
+	}
+	class_destroy(aim_class);
+	unregister_chrdev_region(aim_devno, 1);
+	ida_destroy(&minor_id);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+MODULE_AUTHOR("Christian Gromm <christian.gromm@xxxxxxxxxxxxx>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("character device AIM for mostcore");
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
new file mode 100644
index 0000000..6ee6acf
--- /dev/null
+++ b/drivers/staging/most/core.c
@@ -0,0 +1,1949 @@
+/*
+ * core.c - Implementation of core module of MOST Linux driver stack
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/sysfs.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <most/core.h>
+
+#define MAX_CHANNELS	64
+#define STRING_SIZE	80
+
+static struct class *most_class;
+static struct device *core_dev;
+static struct ida mdev_id;
+static int dummy_num_buffers;
+
+struct most_c_aim_obj {
+	struct most_aim *ptr;
+	int refs;
+	int num_buffers;
+};
+
+struct most_c_obj {
+	struct kobject kobj;
+	struct completion cleanup;
+	atomic_t mbo_ref;
+	atomic_t mbo_nq_level;
+	u16 channel_id;
+	bool is_poisoned;
+	struct mutex start_mutex;
+	struct mutex nq_mutex; /* nq thread synchronization */
+	int is_starving;
+	struct most_interface *iface;
+	struct most_inst_obj *inst;
+	struct most_channel_config cfg;
+	bool keep_mbo;
+	bool enqueue_halt;
+	struct list_head fifo;
+	spinlock_t fifo_lock;
+	struct list_head halt_fifo;
+	struct list_head list;
+	struct most_c_aim_obj aim0;
+	struct most_c_aim_obj aim1;
+	struct list_head trash_fifo;
+	struct task_struct *hdm_enqueue_task;
+	wait_queue_head_t hdm_fifo_wq;
+};
+
+#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
+
+struct most_inst_obj {
+	int dev_id;
+	struct most_interface *iface;
+	struct list_head channel_list;
+	struct most_c_obj *channel[MAX_CHANNELS];
+	struct kobject kobj;
+	struct list_head list;
+};
+
+static const struct {
+	int most_ch_data_type;
+	const char *name;
+} ch_data_type[] = {
+	{ MOST_CH_CONTROL, "control\n" },
+	{ MOST_CH_ASYNC, "async\n" },
+	{ MOST_CH_SYNC, "sync\n" },
+	{ MOST_CH_ISOC, "isoc\n"},
+	{ MOST_CH_ISOC, "isoc_avp\n"},
+};
+
+#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
+
+/**
+ * list_pop_mbo - retrieves the first MBO of the list and removes it
+ * @ptr: the list head to grab the MBO from.
+ */
+#define list_pop_mbo(ptr)						\
+({									\
+	struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);	\
+	list_del(&_mbo->list);						\
+	_mbo;								\
+})
+
+/*		     ___	     ___
+ *		     ___C H A N N E L___
+ */
+
+/**
+ * struct most_c_attr - to access the attributes of a channel object
+ * @attr: attributes of a channel
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_c_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct most_c_obj *d,
+			struct most_c_attr *attr,
+			char *buf);
+	ssize_t (*store)(struct most_c_obj *d,
+			 struct most_c_attr *attr,
+			 const char *buf,
+			 size_t count);
+};
+
+#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
+
+/**
+ * channel_attr_show - show function of channel object
+ * @kobj: pointer to its kobject
+ * @attr: pointer to its attributes
+ * @buf: buffer
+ */
+static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
+				 char *buf)
+{
+	struct most_c_attr *channel_attr = to_channel_attr(attr);
+	struct most_c_obj *c_obj = to_c_obj(kobj);
+
+	if (!channel_attr->show)
+		return -EIO;
+
+	return channel_attr->show(c_obj, channel_attr, buf);
+}
+
+/**
+ * channel_attr_store - store function of channel object
+ * @kobj: pointer to its kobject
+ * @attr: pointer to its attributes
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t channel_attr_store(struct kobject *kobj,
+				  struct attribute *attr,
+				  const char *buf,
+				  size_t len)
+{
+	struct most_c_attr *channel_attr = to_channel_attr(attr);
+	struct most_c_obj *c_obj = to_c_obj(kobj);
+
+	if (!channel_attr->store)
+		return -EIO;
+	return channel_attr->store(c_obj, channel_attr, buf, len);
+}
+
+static const struct sysfs_ops most_channel_sysfs_ops = {
+	.show = channel_attr_show,
+	.store = channel_attr_store,
+};
+
+/**
+ * most_free_mbo_coherent - free an MBO and its coherent buffer
+ * @mbo: buffer to be released
+ *
+ */
+static void most_free_mbo_coherent(struct mbo *mbo)
+{
+	struct most_c_obj *c = mbo->context;
+	u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+	dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
+			  mbo->bus_address);
+	kfree(mbo);
+	if (atomic_sub_and_test(1, &c->mbo_ref))
+		complete(&c->cleanup);
+}
+
+/**
+ * flush_channel_fifos - clear the channel fifos
+ * @c: pointer to channel object
+ */
+static void flush_channel_fifos(struct most_c_obj *c)
+{
+	unsigned long flags, hf_flags;
+	struct mbo *mbo, *tmp;
+
+	if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
+		return;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
+		list_del(&mbo->list);
+		spin_unlock_irqrestore(&c->fifo_lock, flags);
+		most_free_mbo_coherent(mbo);
+		spin_lock_irqsave(&c->fifo_lock, flags);
+	}
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+	spin_lock_irqsave(&c->fifo_lock, hf_flags);
+	list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
+		list_del(&mbo->list);
+		spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+		most_free_mbo_coherent(mbo);
+		spin_lock_irqsave(&c->fifo_lock, hf_flags);
+	}
+	spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+
+	if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
+		pr_info("WARN: fifo | trash fifo not empty\n");
+}
+
+/**
+ * flush_trash_fifo - clear the trash fifo
+ * @c: pointer to channel object
+ */
+static int flush_trash_fifo(struct most_c_obj *c)
+{
+	struct mbo *mbo, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
+		list_del(&mbo->list);
+		spin_unlock_irqrestore(&c->fifo_lock, flags);
+		most_free_mbo_coherent(mbo);
+		spin_lock_irqsave(&c->fifo_lock, flags);
+	}
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+	return 0;
+}
+
+/**
+ * most_channel_release - release function of channel object
+ * @kobj: pointer to channel's kobject
+ */
+static void most_channel_release(struct kobject *kobj)
+{
+	struct most_c_obj *c = to_c_obj(kobj);
+
+	kfree(c);
+}
+
+static ssize_t available_directions_show(struct most_c_obj *c,
+					 struct most_c_attr *attr,
+					 char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	strcpy(buf, "");
+	if (c->iface->channel_vector[i].direction & MOST_CH_RX)
+		strcat(buf, "rx ");
+	if (c->iface->channel_vector[i].direction & MOST_CH_TX)
+		strcat(buf, "tx ");
+	strcat(buf, "\n");
+	return strlen(buf);
+}
+
+static ssize_t available_datatypes_show(struct most_c_obj *c,
+					struct most_c_attr *attr,
+					char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	strcpy(buf, "");
+	if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
+		strcat(buf, "control ");
+	if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
+		strcat(buf, "async ");
+	if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
+		strcat(buf, "sync ");
+	if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
+		strcat(buf, "isoc ");
+	strcat(buf, "\n");
+	return strlen(buf);
+}
+
+static ssize_t number_of_packet_buffers_show(struct most_c_obj *c,
+					     struct most_c_attr *attr,
+					     char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			c->iface->channel_vector[i].num_buffers_packet);
+}
+
+static ssize_t number_of_stream_buffers_show(struct most_c_obj *c,
+					     struct most_c_attr *attr,
+					     char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			c->iface->channel_vector[i].num_buffers_streaming);
+}
+
+static ssize_t size_of_packet_buffer_show(struct most_c_obj *c,
+					  struct most_c_attr *attr,
+					  char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			c->iface->channel_vector[i].buffer_size_packet);
+}
+
+static ssize_t size_of_stream_buffer_show(struct most_c_obj *c,
+					  struct most_c_attr *attr,
+					  char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			c->iface->channel_vector[i].buffer_size_streaming);
+}
+
+static ssize_t channel_starving_show(struct most_c_obj *c,
+				     struct most_c_attr *attr,
+				     char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
+}
+
+static ssize_t set_number_of_buffers_show(struct most_c_obj *c,
+					  struct most_c_attr *attr,
+					  char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
+}
+
+static ssize_t set_number_of_buffers_store(struct most_c_obj *c,
+					   struct most_c_attr *attr,
+					   const char *buf,
+					   size_t count)
+{
+	int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
+
+	if (ret)
+		return ret;
+	return count;
+}
+
+static ssize_t set_buffer_size_show(struct most_c_obj *c,
+				    struct most_c_attr *attr,
+				    char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
+}
+
+static ssize_t set_buffer_size_store(struct most_c_obj *c,
+				     struct most_c_attr *attr,
+				     const char *buf,
+				     size_t count)
+{
+	int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
+
+	if (ret)
+		return ret;
+	return count;
+}
+
+static ssize_t set_direction_show(struct most_c_obj *c,
+				  struct most_c_attr *attr,
+				  char *buf)
+{
+	if (c->cfg.direction & MOST_CH_TX)
+		return snprintf(buf, PAGE_SIZE, "tx\n");
+	else if (c->cfg.direction & MOST_CH_RX)
+		return snprintf(buf, PAGE_SIZE, "rx\n");
+	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t set_direction_store(struct most_c_obj *c,
+				   struct most_c_attr *attr,
+				   const char *buf,
+				   size_t count)
+{
+	if (!strcmp(buf, "dir_rx\n")) {
+		c->cfg.direction = MOST_CH_RX;
+	} else if (!strcmp(buf, "rx\n")) {
+		c->cfg.direction = MOST_CH_RX;
+	} else if (!strcmp(buf, "dir_tx\n")) {
+		c->cfg.direction = MOST_CH_TX;
+	} else if (!strcmp(buf, "tx\n")) {
+		c->cfg.direction = MOST_CH_TX;
+	} else {
+		pr_info("WARN: invalid attribute settings\n");
+		return -EINVAL;
+	}
+	return count;
+}
+
+static ssize_t set_datatype_show(struct most_c_obj *c,
+				 struct most_c_attr *attr,
+				 char *buf)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+		if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
+			return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
+	}
+	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t set_datatype_store(struct most_c_obj *c,
+				  struct most_c_attr *attr,
+				  const char *buf,
+				  size_t count)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+		if (!strcmp(buf, ch_data_type[i].name)) {
+			c->cfg.data_type = ch_data_type[i].most_ch_data_type;
+			break;
+		}
+	}
+
+	if (i == ARRAY_SIZE(ch_data_type)) {
+		pr_info("WARN: invalid attribute settings\n");
+		return -EINVAL;
+	}
+	return count;
+}
+
+static ssize_t set_subbuffer_size_show(struct most_c_obj *c,
+				       struct most_c_attr *attr,
+				       char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
+}
+
+static ssize_t set_subbuffer_size_store(struct most_c_obj *c,
+					struct most_c_attr *attr,
+					const char *buf,
+					size_t count)
+{
+	int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
+
+	if (ret)
+		return ret;
+	return count;
+}
+
+static ssize_t set_packets_per_xact_show(struct most_c_obj *c,
+					 struct most_c_attr *attr,
+					 char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
+}
+
+static ssize_t set_packets_per_xact_store(struct most_c_obj *c,
+					  struct most_c_attr *attr,
+					  const char *buf,
+					  size_t count)
+{
+	int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
+
+	if (ret)
+		return ret;
+	return count;
+}
+
+static struct most_c_attr most_c_attrs[] = {
+	__ATTR_RO(available_directions),
+	__ATTR_RO(available_datatypes),
+	__ATTR_RO(number_of_packet_buffers),
+	__ATTR_RO(number_of_stream_buffers),
+	__ATTR_RO(size_of_stream_buffer),
+	__ATTR_RO(size_of_packet_buffer),
+	__ATTR_RO(channel_starving),
+	__ATTR_RW(set_buffer_size),
+	__ATTR_RW(set_number_of_buffers),
+	__ATTR_RW(set_direction),
+	__ATTR_RW(set_datatype),
+	__ATTR_RW(set_subbuffer_size),
+	__ATTR_RW(set_packets_per_xact),
+};
+
+/**
+ * most_channel_def_attrs - array of default attributes of channel object
+ */
+static struct attribute *most_channel_def_attrs[] = {
+	&most_c_attrs[0].attr,
+	&most_c_attrs[1].attr,
+	&most_c_attrs[2].attr,
+	&most_c_attrs[3].attr,
+	&most_c_attrs[4].attr,
+	&most_c_attrs[5].attr,
+	&most_c_attrs[6].attr,
+	&most_c_attrs[7].attr,
+	&most_c_attrs[8].attr,
+	&most_c_attrs[9].attr,
+	&most_c_attrs[10].attr,
+	&most_c_attrs[11].attr,
+	&most_c_attrs[12].attr,
+	NULL,
+};
+
+static struct kobj_type most_channel_ktype = {
+	.sysfs_ops = &most_channel_sysfs_ops,
+	.release = most_channel_release,
+	.default_attrs = most_channel_def_attrs,
+};
+
+static struct kset *most_channel_kset;
+
+/**
+ * create_most_c_obj - allocates a channel object
+ * @name: name of the channel object
+ * @parent: parent kobject
+ *
+ * This create a channel object and registers it with sysfs.
+ * Returns a pointer to the object or NULL when something went wrong.
+ */
+static struct most_c_obj *
+create_most_c_obj(const char *name, struct kobject *parent)
+{
+	struct most_c_obj *c;
+	int retval;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return NULL;
+	c->kobj.kset = most_channel_kset;
+	retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
+				      "%s", name);
+	if (retval) {
+		kobject_put(&c->kobj);
+		return NULL;
+	}
+	kobject_uevent(&c->kobj, KOBJ_ADD);
+	return c;
+}
+
+/*		     ___	       ___
+ *		     ___I N S T A N C E___
+ */
+
+static struct list_head instance_list;
+
+/**
+ * struct most_inst_attribute - to access the attributes of instance object
+ * @attr: attributes of an instance
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_inst_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct most_inst_obj *d,
+			struct most_inst_attribute *attr,
+			char *buf);
+	ssize_t (*store)(struct most_inst_obj *d,
+			 struct most_inst_attribute *attr,
+			 const char *buf,
+			 size_t count);
+};
+
+#define to_instance_attr(a) \
+	container_of(a, struct most_inst_attribute, attr)
+
+/**
+ * instance_attr_show - show function for an instance object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t instance_attr_show(struct kobject *kobj,
+				  struct attribute *attr,
+				  char *buf)
+{
+	struct most_inst_attribute *instance_attr;
+	struct most_inst_obj *instance_obj;
+
+	instance_attr = to_instance_attr(attr);
+	instance_obj = to_inst_obj(kobj);
+
+	if (!instance_attr->show)
+		return -EIO;
+
+	return instance_attr->show(instance_obj, instance_attr, buf);
+}
+
+/**
+ * instance_attr_store - store function for an instance object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t instance_attr_store(struct kobject *kobj,
+				   struct attribute *attr,
+				   const char *buf,
+				   size_t len)
+{
+	struct most_inst_attribute *instance_attr;
+	struct most_inst_obj *instance_obj;
+
+	instance_attr = to_instance_attr(attr);
+	instance_obj = to_inst_obj(kobj);
+
+	if (!instance_attr->store)
+		return -EIO;
+
+	return instance_attr->store(instance_obj, instance_attr, buf, len);
+}
+
+static const struct sysfs_ops most_inst_sysfs_ops = {
+	.show = instance_attr_show,
+	.store = instance_attr_store,
+};
+
+/**
+ * most_inst_release - release function for instance object
+ * @kobj: pointer to instance's kobject
+ *
+ * This frees the allocated memory for the instance object
+ */
+static void most_inst_release(struct kobject *kobj)
+{
+	struct most_inst_obj *inst = to_inst_obj(kobj);
+
+	kfree(inst);
+}
+
+static ssize_t description_show(struct most_inst_obj *instance_obj,
+				struct most_inst_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			instance_obj->iface->description);
+}
+
+static ssize_t interface_show(struct most_inst_obj *instance_obj,
+			      struct most_inst_attribute *attr,
+			      char *buf)
+{
+	switch (instance_obj->iface->interface) {
+	case ITYPE_LOOPBACK:
+		return snprintf(buf, PAGE_SIZE, "loopback\n");
+	case ITYPE_I2C:
+		return snprintf(buf, PAGE_SIZE, "i2c\n");
+	case ITYPE_I2S:
+		return snprintf(buf, PAGE_SIZE, "i2s\n");
+	case ITYPE_TSI:
+		return snprintf(buf, PAGE_SIZE, "tsi\n");
+	case ITYPE_HBI:
+		return snprintf(buf, PAGE_SIZE, "hbi\n");
+	case ITYPE_MEDIALB_DIM:
+		return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
+	case ITYPE_MEDIALB_DIM2:
+		return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
+	case ITYPE_USB:
+		return snprintf(buf, PAGE_SIZE, "usb\n");
+	case ITYPE_PCIE:
+		return snprintf(buf, PAGE_SIZE, "pcie\n");
+	}
+	return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+static struct most_inst_attribute most_inst_attr_description =
+	__ATTR_RO(description);
+
+static struct most_inst_attribute most_inst_attr_interface =
+	__ATTR_RO(interface);
+
+static struct attribute *most_inst_def_attrs[] = {
+	&most_inst_attr_description.attr,
+	&most_inst_attr_interface.attr,
+	NULL,
+};
+
+static struct kobj_type most_inst_ktype = {
+	.sysfs_ops = &most_inst_sysfs_ops,
+	.release = most_inst_release,
+	.default_attrs = most_inst_def_attrs,
+};
+
+static struct kset *most_inst_kset;
+
+/**
+ * create_most_inst_obj - creates an instance object
+ * @name: name of the object to be created
+ *
+ * This allocates memory for an instance structure, assigns the proper kset
+ * and registers it with sysfs.
+ *
+ * Returns a pointer to the instance object or NULL when something went wrong.
+ */
+static struct most_inst_obj *create_most_inst_obj(const char *name)
+{
+	struct most_inst_obj *inst;
+	int retval;
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst)
+		return NULL;
+	inst->kobj.kset = most_inst_kset;
+	retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
+				      "%s", name);
+	if (retval) {
+		kobject_put(&inst->kobj);
+		return NULL;
+	}
+	kobject_uevent(&inst->kobj, KOBJ_ADD);
+	return inst;
+}
+
+/**
+ * destroy_most_inst_obj - MOST instance release function
+ * @inst: pointer to the instance object
+ *
+ * This decrements the reference counter of the instance object.
+ * If the reference count turns zero, its release function is called
+ */
+static void destroy_most_inst_obj(struct most_inst_obj *inst)
+{
+	struct most_c_obj *c, *tmp;
+
+	list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
+		flush_trash_fifo(c);
+		flush_channel_fifos(c);
+		kobject_put(&c->kobj);
+	}
+	kobject_put(&inst->kobj);
+}
+
+/*		     ___     ___
+ *		     ___A I M___
+ */
+struct most_aim_obj {
+	struct kobject kobj;
+	struct list_head list;
+	struct most_aim *driver;
+};
+
+#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
+
+static struct list_head aim_list;
+
+/**
+ * struct most_aim_attribute - to access the attributes of AIM object
+ * @attr: attributes of an AIM
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_aim_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct most_aim_obj *d,
+			struct most_aim_attribute *attr,
+			char *buf);
+	ssize_t (*store)(struct most_aim_obj *d,
+			 struct most_aim_attribute *attr,
+			 const char *buf,
+			 size_t count);
+};
+
+#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
+
+/**
+ * aim_attr_show - show function of an AIM object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t aim_attr_show(struct kobject *kobj,
+			     struct attribute *attr,
+			     char *buf)
+{
+	struct most_aim_attribute *aim_attr;
+	struct most_aim_obj *aim_obj;
+
+	aim_attr = to_aim_attr(attr);
+	aim_obj = to_aim_obj(kobj);
+
+	if (!aim_attr->show)
+		return -EIO;
+
+	return aim_attr->show(aim_obj, aim_attr, buf);
+}
+
+/**
+ * aim_attr_store - store function of an AIM object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t aim_attr_store(struct kobject *kobj,
+			      struct attribute *attr,
+			      const char *buf,
+			      size_t len)
+{
+	struct most_aim_attribute *aim_attr;
+	struct most_aim_obj *aim_obj;
+
+	aim_attr = to_aim_attr(attr);
+	aim_obj = to_aim_obj(kobj);
+
+	if (!aim_attr->store)
+		return -EIO;
+	return aim_attr->store(aim_obj, aim_attr, buf, len);
+}
+
+static const struct sysfs_ops most_aim_sysfs_ops = {
+	.show = aim_attr_show,
+	.store = aim_attr_store,
+};
+
+/**
+ * most_aim_release - AIM release function
+ * @kobj: pointer to AIM's kobject
+ */
+static void most_aim_release(struct kobject *kobj)
+{
+	struct most_aim_obj *aim_obj = to_aim_obj(kobj);
+
+	kfree(aim_obj);
+}
+
+static ssize_t links_show(struct most_aim_obj *aim_obj,
+			  struct most_aim_attribute *attr,
+			  char *buf)
+{
+	struct most_c_obj *c;
+	struct most_inst_obj *i;
+	int offs = 0;
+
+	list_for_each_entry(i, &instance_list, list) {
+		list_for_each_entry(c, &i->channel_list, list) {
+			if (c->aim0.ptr == aim_obj->driver ||
+			    c->aim1.ptr == aim_obj->driver) {
+				offs += snprintf(buf + offs, PAGE_SIZE - offs,
+						 "%s:%s\n",
+						 kobject_name(&i->kobj),
+						 kobject_name(&c->kobj));
+			}
+		}
+	}
+
+	return offs;
+}
+
+/**
+ * split_string - parses and changes string in the buffer buf and
+ * splits it into two mandatory and one optional substrings.
+ *
+ * @buf: complete string from attribute 'add_channel'
+ * @a: address of pointer to 1st substring (=instance name)
+ * @b: address of pointer to 2nd substring (=channel name)
+ * @c: optional address of pointer to 3rd substring (=user defined name)
+ *
+ * Examples:
+ *
+ * Input: "mdev0:ch6:my_channel\n" or
+ *        "mdev0:ch6:my_channel"
+ *
+ * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
+ *
+ * Input: "mdev1:ep81\n"
+ * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
+ *
+ * Input: "mdev1:ep81"
+ * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
+ */
+static int split_string(char *buf, char **a, char **b, char **c)
+{
+	*a = strsep(&buf, ":");
+	if (!*a)
+		return -EIO;
+
+	*b = strsep(&buf, ":\n");
+	if (!*b)
+		return -EIO;
+
+	if (c)
+		*c = strsep(&buf, ":\n");
+
+	return 0;
+}
+
+/**
+ * get_channel_by_name - get pointer to channel object
+ * @mdev: name of the device instance
+ * @mdev_ch: name of the respective channel
+ *
+ * This retrieves the pointer to a channel object.
+ */
+static struct
+most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
+{
+	struct most_c_obj *c, *tmp;
+	struct most_inst_obj *i, *i_tmp;
+	int found = 0;
+
+	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+		if (!strcmp(kobject_name(&i->kobj), mdev)) {
+			found++;
+			break;
+		}
+	}
+	if (unlikely(!found))
+		return ERR_PTR(-EIO);
+
+	list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
+		if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
+			found++;
+			break;
+		}
+	}
+	if (unlikely(found < 2))
+		return ERR_PTR(-EIO);
+	return c;
+}
+
+/**
+ * add_link_store - store() function for add_link attribute
+ * @aim_obj: pointer to AIM object
+ * @attr: its attributes
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * This parses the string given by buf and splits it into
+ * three substrings. Note: third substring is optional. In case a cdev
+ * AIM is loaded the optional 3rd substring will make up the name of
+ * device node in the /dev directory. If omitted, the device node will
+ * inherit the channel's name within sysfs.
+ *
+ * Searches for a pair of device and channel and probes the AIM
+ *
+ * Example:
+ * (1) echo "mdev0:ch6:my_rxchannel" >add_link
+ * (2) echo "mdev1:ep81" >add_link
+ *
+ * (1) would create the device node /dev/my_rxchannel
+ * (2) would create the device node /dev/mdev1-ep81
+ */
+static ssize_t add_link_store(struct most_aim_obj *aim_obj,
+			      struct most_aim_attribute *attr,
+			      const char *buf,
+			      size_t len)
+{
+	struct most_c_obj *c;
+	struct most_aim **aim_ptr;
+	char buffer[STRING_SIZE];
+	char *mdev;
+	char *mdev_ch;
+	char *mdev_devnod;
+	char devnod_buf[STRING_SIZE];
+	int ret;
+	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+	strlcpy(buffer, buf, max_len);
+
+	ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
+	if (ret)
+		return ret;
+
+	if (!mdev_devnod || *mdev_devnod == 0) {
+		snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
+			 mdev_ch);
+		mdev_devnod = devnod_buf;
+	}
+
+	c = get_channel_by_name(mdev, mdev_ch);
+	if (IS_ERR(c))
+		return -ENODEV;
+
+	if (!c->aim0.ptr)
+		aim_ptr = &c->aim0.ptr;
+	else if (!c->aim1.ptr)
+		aim_ptr = &c->aim1.ptr;
+	else
+		return -ENOSPC;
+
+	*aim_ptr = aim_obj->driver;
+	ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
+					     &c->cfg, &c->kobj, mdev_devnod);
+	if (ret) {
+		*aim_ptr = NULL;
+		return ret;
+	}
+
+	return len;
+}
+
+/**
+ * remove_link_store - store function for remove_link attribute
+ * @aim_obj: pointer to AIM object
+ * @attr: its attributes
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * Example:
+ * echo "mdev0:ep81" >remove_link
+ */
+static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
+				 struct most_aim_attribute *attr,
+				 const char *buf,
+				 size_t len)
+{
+	struct most_c_obj *c;
+	char buffer[STRING_SIZE];
+	char *mdev;
+	char *mdev_ch;
+	int ret;
+	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+	strlcpy(buffer, buf, max_len);
+	ret = split_string(buffer, &mdev, &mdev_ch, NULL);
+	if (ret)
+		return ret;
+
+	c = get_channel_by_name(mdev, mdev_ch);
+	if (IS_ERR(c))
+		return -ENODEV;
+
+	if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
+		return -EIO;
+	if (c->aim0.ptr == aim_obj->driver)
+		c->aim0.ptr = NULL;
+	if (c->aim1.ptr == aim_obj->driver)
+		c->aim1.ptr = NULL;
+	return len;
+}
+
+static struct most_aim_attribute most_aim_attrs[] = {
+	__ATTR_RO(links),
+	__ATTR_WO(add_link),
+	__ATTR_WO(remove_link),
+};
+
+static struct attribute *most_aim_def_attrs[] = {
+	&most_aim_attrs[0].attr,
+	&most_aim_attrs[1].attr,
+	&most_aim_attrs[2].attr,
+	NULL,
+};
+
+static struct kobj_type most_aim_ktype = {
+	.sysfs_ops = &most_aim_sysfs_ops,
+	.release = most_aim_release,
+	.default_attrs = most_aim_def_attrs,
+};
+
+static struct kset *most_aim_kset;
+
+/**
+ * create_most_aim_obj - creates an AIM object
+ * @name: name of the AIM
+ *
+ * This creates an AIM object assigns the proper kset and registers
+ * it with sysfs.
+ * Returns a pointer to the object or NULL if something went wrong.
+ */
+static struct most_aim_obj *create_most_aim_obj(const char *name)
+{
+	struct most_aim_obj *most_aim;
+	int retval;
+
+	most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
+	if (!most_aim)
+		return NULL;
+	most_aim->kobj.kset = most_aim_kset;
+	retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
+				      NULL, "%s", name);
+	if (retval) {
+		kobject_put(&most_aim->kobj);
+		return NULL;
+	}
+	kobject_uevent(&most_aim->kobj, KOBJ_ADD);
+	return most_aim;
+}
+
+/**
+ * destroy_most_aim_obj - AIM release function
+ * @p: pointer to AIM object
+ *
+ * This decrements the reference counter of the AIM object. If the
+ * reference count turns zero, its release function will be called.
+ */
+static void destroy_most_aim_obj(struct most_aim_obj *p)
+{
+	kobject_put(&p->kobj);
+}
+
+/*		     ___       ___
+ *		     ___C O R E___
+ */
+
+/**
+ * Instantiation of the MOST bus
+ */
+static struct bus_type most_bus = {
+	.name = "most",
+};
+
+/**
+ * Instantiation of the core driver
+ */
+static struct device_driver mostcore = {
+	.name = "mostcore",
+	.bus = &most_bus,
+};
+
+static inline void trash_mbo(struct mbo *mbo)
+{
+	unsigned long flags;
+	struct most_c_obj *c = mbo->context;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	list_add(&mbo->list, &c->trash_fifo);
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+}
+
+static bool hdm_mbo_ready(struct most_c_obj *c)
+{
+	bool empty;
+
+	if (c->enqueue_halt)
+		return false;
+
+	spin_lock_irq(&c->fifo_lock);
+	empty = list_empty(&c->halt_fifo);
+	spin_unlock_irq(&c->fifo_lock);
+
+	return !empty;
+}
+
+static void nq_hdm_mbo(struct mbo *mbo)
+{
+	unsigned long flags;
+	struct most_c_obj *c = mbo->context;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	list_add_tail(&mbo->list, &c->halt_fifo);
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+	wake_up_interruptible(&c->hdm_fifo_wq);
+}
+
+static int hdm_enqueue_thread(void *data)
+{
+	struct most_c_obj *c = data;
+	struct mbo *mbo;
+	int ret;
+	typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
+
+	while (likely(!kthread_should_stop())) {
+		wait_event_interruptible(c->hdm_fifo_wq,
+					 hdm_mbo_ready(c) ||
+					 kthread_should_stop());
+
+		mutex_lock(&c->nq_mutex);
+		spin_lock_irq(&c->fifo_lock);
+		if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
+			spin_unlock_irq(&c->fifo_lock);
+			mutex_unlock(&c->nq_mutex);
+			continue;
+		}
+
+		mbo = list_pop_mbo(&c->halt_fifo);
+		spin_unlock_irq(&c->fifo_lock);
+
+		if (c->cfg.direction == MOST_CH_RX)
+			mbo->buffer_length = c->cfg.buffer_size;
+
+		ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
+		mutex_unlock(&c->nq_mutex);
+
+		if (unlikely(ret)) {
+			pr_err("hdm enqueue failed\n");
+			nq_hdm_mbo(mbo);
+			c->hdm_enqueue_task = NULL;
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
+{
+	struct task_struct *task =
+		kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
+			    channel_id);
+
+	if (IS_ERR(task))
+		return PTR_ERR(task);
+
+	c->hdm_enqueue_task = task;
+	return 0;
+}
+
+/**
+ * arm_mbo - recycle MBO for further usage
+ * @mbo: buffer object
+ *
+ * This puts an MBO back to the list to have it ready for up coming
+ * tx transactions.
+ *
+ * In case the MBO belongs to a channel that recently has been
+ * poisoned, the MBO is scheduled to be trashed.
+ * Calls the completion handler of an attached AIM.
+ */
+static void arm_mbo(struct mbo *mbo)
+{
+	unsigned long flags;
+	struct most_c_obj *c;
+
+	BUG_ON((!mbo) || (!mbo->context));
+	c = mbo->context;
+
+	if (c->is_poisoned) {
+		trash_mbo(mbo);
+		return;
+	}
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	++*mbo->num_buffers_ptr;
+	list_add_tail(&mbo->list, &c->fifo);
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+	if (c->aim0.refs && c->aim0.ptr->tx_completion)
+		c->aim0.ptr->tx_completion(c->iface, c->channel_id);
+
+	if (c->aim1.refs && c->aim1.ptr->tx_completion)
+		c->aim1.ptr->tx_completion(c->iface, c->channel_id);
+}
+
+/**
+ * arm_mbo_chain - helper function that arms an MBO chain for the HDM
+ * @c: pointer to interface channel
+ * @dir: direction of the channel
+ * @compl: pointer to completion function
+ *
+ * This allocates buffer objects including the containing DMA coherent
+ * buffer and puts them in the fifo.
+ * Buffers of Rx channels are put in the kthread fifo, hence immediately
+ * submitted to the HDM.
+ *
+ * Returns the number of allocated and enqueued MBOs.
+ */
+static int arm_mbo_chain(struct most_c_obj *c, int dir,
+			 void (*compl)(struct mbo *))
+{
+	unsigned int i;
+	int retval;
+	struct mbo *mbo;
+	u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+	atomic_set(&c->mbo_nq_level, 0);
+
+	for (i = 0; i < c->cfg.num_buffers; i++) {
+		mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
+		if (!mbo) {
+			retval = i;
+			goto _exit;
+		}
+		mbo->context = c;
+		mbo->ifp = c->iface;
+		mbo->hdm_channel_id = c->channel_id;
+		mbo->virt_address = dma_alloc_coherent(NULL,
+						       coherent_buf_size,
+						       &mbo->bus_address,
+						       GFP_KERNEL);
+		if (!mbo->virt_address) {
+			pr_info("WARN: No DMA coherent buffer.\n");
+			retval = i;
+			goto _error1;
+		}
+		mbo->complete = compl;
+		mbo->num_buffers_ptr = &dummy_num_buffers;
+		if (dir == MOST_CH_RX) {
+			nq_hdm_mbo(mbo);
+			atomic_inc(&c->mbo_nq_level);
+		} else {
+			arm_mbo(mbo);
+		}
+	}
+	return i;
+
+_error1:
+	kfree(mbo);
+_exit:
+	return retval;
+}
+
+/**
+ * most_submit_mbo - submits an MBO to fifo
+ * @mbo: pointer to the MBO
+ */
+void most_submit_mbo(struct mbo *mbo)
+{
+	if (WARN_ONCE(!mbo || !mbo->context,
+		      "bad mbo or missing channel reference\n"))
+		return;
+
+	nq_hdm_mbo(mbo);
+}
+EXPORT_SYMBOL_GPL(most_submit_mbo);
+
+/**
+ * most_write_completion - write completion handler
+ * @mbo: pointer to MBO
+ *
+ * This recycles the MBO for further usage. In case the channel has been
+ * poisoned, the MBO is scheduled to be trashed.
+ */
+static void most_write_completion(struct mbo *mbo)
+{
+	struct most_c_obj *c;
+
+	BUG_ON((!mbo) || (!mbo->context));
+
+	c = mbo->context;
+	if (mbo->status == MBO_E_INVAL)
+		pr_info("WARN: Tx MBO status: invalid\n");
+	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
+		trash_mbo(mbo);
+	else
+		arm_mbo(mbo);
+}
+
+/**
+ * get_channel_by_iface - get pointer to channel object
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This retrieves a pointer to a channel of the given interface and channel ID.
+ */
+static struct
+most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
+{
+	struct most_inst_obj *i;
+
+	if (unlikely(!iface)) {
+		pr_err("Bad interface\n");
+		return NULL;
+	}
+	if (unlikely((id < 0) || (id >= iface->num_channels))) {
+		pr_err("Channel index (%d) out of range\n", id);
+		return NULL;
+	}
+	i = iface->priv;
+	if (unlikely(!i)) {
+		pr_err("interface is not registered\n");
+		return NULL;
+	}
+	return i->channel[id];
+}
+
+int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
+{
+	struct most_c_obj *c = get_channel_by_iface(iface, id);
+	unsigned long flags;
+	int empty;
+
+	if (unlikely(!c))
+		return -EINVAL;
+
+	if (c->aim0.refs && c->aim1.refs &&
+	    ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
+	     (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
+		return 0;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	empty = list_empty(&c->fifo);
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+	return !empty;
+}
+EXPORT_SYMBOL_GPL(channel_has_mbo);
+
+/**
+ * most_get_mbo - get pointer to an MBO of pool
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This attempts to get a free buffer out of the channel fifo.
+ * Returns a pointer to MBO on success or NULL otherwise.
+ */
+struct mbo *most_get_mbo(struct most_interface *iface, int id,
+			 struct most_aim *aim)
+{
+	struct mbo *mbo;
+	struct most_c_obj *c;
+	unsigned long flags;
+	int *num_buffers_ptr;
+
+	c = get_channel_by_iface(iface, id);
+	if (unlikely(!c))
+		return NULL;
+
+	if (c->aim0.refs && c->aim1.refs &&
+	    ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
+	     (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
+		return NULL;
+
+	if (aim == c->aim0.ptr)
+		num_buffers_ptr = &c->aim0.num_buffers;
+	else if (aim == c->aim1.ptr)
+		num_buffers_ptr = &c->aim1.num_buffers;
+	else
+		num_buffers_ptr = &dummy_num_buffers;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	if (list_empty(&c->fifo)) {
+		spin_unlock_irqrestore(&c->fifo_lock, flags);
+		return NULL;
+	}
+	mbo = list_pop_mbo(&c->fifo);
+	--*num_buffers_ptr;
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+	mbo->num_buffers_ptr = num_buffers_ptr;
+	mbo->buffer_length = c->cfg.buffer_size;
+	return mbo;
+}
+EXPORT_SYMBOL_GPL(most_get_mbo);
+
+/**
+ * most_put_mbo - return buffer to pool
+ * @mbo: buffer object
+ */
+void most_put_mbo(struct mbo *mbo)
+{
+	struct most_c_obj *c = mbo->context;
+
+	if (c->cfg.direction == MOST_CH_TX) {
+		arm_mbo(mbo);
+		return;
+	}
+	nq_hdm_mbo(mbo);
+	atomic_inc(&c->mbo_nq_level);
+}
+EXPORT_SYMBOL_GPL(most_put_mbo);
+
+/**
+ * most_read_completion - read completion handler
+ * @mbo: pointer to MBO
+ *
+ * This function is called by the HDM when data has been received from the
+ * hardware and copied to the buffer of the MBO.
+ *
+ * In case the channel has been poisoned it puts the buffer in the trash queue.
+ * Otherwise, it passes the buffer to an AIM for further processing.
+ */
+static void most_read_completion(struct mbo *mbo)
+{
+	struct most_c_obj *c = mbo->context;
+
+	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
+		trash_mbo(mbo);
+		return;
+	}
+
+	if (mbo->status == MBO_E_INVAL) {
+		nq_hdm_mbo(mbo);
+		atomic_inc(&c->mbo_nq_level);
+		return;
+	}
+
+	if (atomic_sub_and_test(1, &c->mbo_nq_level))
+		c->is_starving = 1;
+
+	if (c->aim0.refs && c->aim0.ptr->rx_completion &&
+	    c->aim0.ptr->rx_completion(mbo) == 0)
+		return;
+
+	if (c->aim1.refs && c->aim1.ptr->rx_completion &&
+	    c->aim1.ptr->rx_completion(mbo) == 0)
+		return;
+
+	most_put_mbo(mbo);
+}
+
+/**
+ * most_start_channel - prepares a channel for communication
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This prepares the channel for usage. Cross-checks whether the
+ * channel's been properly configured.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+int most_start_channel(struct most_interface *iface, int id,
+		       struct most_aim *aim)
+{
+	int num_buffer;
+	int ret;
+	struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+	if (unlikely(!c))
+		return -EINVAL;
+
+	mutex_lock(&c->start_mutex);
+	if (c->aim0.refs + c->aim1.refs > 0)
+		goto out; /* already started by other aim */
+
+	if (!try_module_get(iface->mod)) {
+		pr_info("failed to acquire HDM lock\n");
+		mutex_unlock(&c->start_mutex);
+		return -ENOLCK;
+	}
+
+	c->cfg.extra_len = 0;
+	if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
+		pr_info("channel configuration failed. Go check settings...\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	init_waitqueue_head(&c->hdm_fifo_wq);
+
+	if (c->cfg.direction == MOST_CH_RX)
+		num_buffer = arm_mbo_chain(c, c->cfg.direction,
+					   most_read_completion);
+	else
+		num_buffer = arm_mbo_chain(c, c->cfg.direction,
+					   most_write_completion);
+	if (unlikely(!num_buffer)) {
+		pr_info("failed to allocate memory\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ret = run_enqueue_thread(c, id);
+	if (ret)
+		goto error;
+
+	c->is_starving = 0;
+	c->aim0.num_buffers = c->cfg.num_buffers / 2;
+	c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
+	atomic_set(&c->mbo_ref, num_buffer);
+
+out:
+	if (aim == c->aim0.ptr)
+		c->aim0.refs++;
+	if (aim == c->aim1.ptr)
+		c->aim1.refs++;
+	mutex_unlock(&c->start_mutex);
+	return 0;
+
+error:
+	module_put(iface->mod);
+	mutex_unlock(&c->start_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(most_start_channel);
+
+/**
+ * most_stop_channel - stops a running channel
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ */
+int most_stop_channel(struct most_interface *iface, int id,
+		      struct most_aim *aim)
+{
+	struct most_c_obj *c;
+
+	if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
+		pr_err("Bad interface or index out of range\n");
+		return -EINVAL;
+	}
+	c = get_channel_by_iface(iface, id);
+	if (unlikely(!c))
+		return -EINVAL;
+
+	mutex_lock(&c->start_mutex);
+	if (c->aim0.refs + c->aim1.refs >= 2)
+		goto out;
+
+	if (c->hdm_enqueue_task)
+		kthread_stop(c->hdm_enqueue_task);
+	c->hdm_enqueue_task = NULL;
+
+	if (iface->mod)
+		module_put(iface->mod);
+
+	c->is_poisoned = true;
+	if (c->iface->poison_channel(c->iface, c->channel_id)) {
+		pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
+		       c->iface->description);
+		mutex_unlock(&c->start_mutex);
+		return -EAGAIN;
+	}
+	flush_trash_fifo(c);
+	flush_channel_fifos(c);
+
+#ifdef CMPL_INTERRUPTIBLE
+	if (wait_for_completion_interruptible(&c->cleanup)) {
+		pr_info("Interrupted while clean up ch %d\n", c->channel_id);
+		mutex_unlock(&c->start_mutex);
+		return -EINTR;
+	}
+#else
+	wait_for_completion(&c->cleanup);
+#endif
+	c->is_poisoned = false;
+
+out:
+	if (aim == c->aim0.ptr)
+		c->aim0.refs--;
+	if (aim == c->aim1.ptr)
+		c->aim1.refs--;
+	mutex_unlock(&c->start_mutex);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(most_stop_channel);
+
+/**
+ * most_register_aim - registers an AIM (driver) with the core
+ * @aim: instance of AIM to be registered
+ */
+int most_register_aim(struct most_aim *aim)
+{
+	struct most_aim_obj *aim_obj;
+
+	if (!aim) {
+		pr_err("Bad driver\n");
+		return -EINVAL;
+	}
+	aim_obj = create_most_aim_obj(aim->name);
+	if (!aim_obj) {
+		pr_info("failed to alloc driver object\n");
+		return -ENOMEM;
+	}
+	aim_obj->driver = aim;
+	aim->context = aim_obj;
+	pr_info("registered new application interfacing module %s\n",
+		aim->name);
+	list_add_tail(&aim_obj->list, &aim_list);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(most_register_aim);
+
+/**
+ * most_deregister_aim - deregisters an AIM (driver) with the core
+ * @aim: AIM to be removed
+ */
+int most_deregister_aim(struct most_aim *aim)
+{
+	struct most_aim_obj *aim_obj;
+	struct most_c_obj *c, *tmp;
+	struct most_inst_obj *i, *i_tmp;
+
+	if (!aim) {
+		pr_err("Bad driver\n");
+		return -EINVAL;
+	}
+
+	aim_obj = aim->context;
+	if (!aim_obj) {
+		pr_info("driver not registered.\n");
+		return -EINVAL;
+	}
+	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+		list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
+			if (c->aim0.ptr == aim || c->aim1.ptr == aim)
+				aim->disconnect_channel(
+					c->iface, c->channel_id);
+			if (c->aim0.ptr == aim)
+				c->aim0.ptr = NULL;
+			if (c->aim1.ptr == aim)
+				c->aim1.ptr = NULL;
+		}
+	}
+	list_del(&aim_obj->list);
+	destroy_most_aim_obj(aim_obj);
+	pr_info("deregistering application interfacing module %s\n", aim->name);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(most_deregister_aim);
+
+/**
+ * most_register_interface - registers an interface with core
+ * @iface: pointer to the instance of the interface description.
+ *
+ * Allocates and initializes a new interface instance and all of its channels.
+ * Returns a pointer to kobject or an error pointer.
+ */
+struct kobject *most_register_interface(struct most_interface *iface)
+{
+	unsigned int i;
+	int id;
+	char name[STRING_SIZE];
+	char channel_name[STRING_SIZE];
+	struct most_c_obj *c;
+	struct most_inst_obj *inst;
+
+	if (!iface || !iface->enqueue || !iface->configure ||
+	    !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
+		pr_err("Bad interface or channel overflow\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
+	if (id < 0) {
+		pr_info("Failed to alloc mdev ID\n");
+		return ERR_PTR(id);
+	}
+	snprintf(name, STRING_SIZE, "mdev%d", id);
+
+	inst = create_most_inst_obj(name);
+	if (!inst) {
+		pr_info("Failed to allocate interface instance\n");
+		ida_simple_remove(&mdev_id, id);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	iface->priv = inst;
+	INIT_LIST_HEAD(&inst->channel_list);
+	inst->iface = iface;
+	inst->dev_id = id;
+	list_add_tail(&inst->list, &instance_list);
+
+	for (i = 0; i < iface->num_channels; i++) {
+		const char *name_suffix = iface->channel_vector[i].name_suffix;
+
+		if (!name_suffix)
+			snprintf(channel_name, STRING_SIZE, "ch%d", i);
+		else
+			snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
+
+		/* this increments the reference count of this instance */
+		c = create_most_c_obj(channel_name, &inst->kobj);
+		if (!c)
+			goto free_instance;
+		inst->channel[i] = c;
+		c->is_starving = 0;
+		c->iface = iface;
+		c->inst = inst;
+		c->channel_id = i;
+		c->keep_mbo = false;
+		c->enqueue_halt = false;
+		c->is_poisoned = false;
+		c->cfg.direction = 0;
+		c->cfg.data_type = 0;
+		c->cfg.num_buffers = 0;
+		c->cfg.buffer_size = 0;
+		c->cfg.subbuffer_size = 0;
+		c->cfg.packets_per_xact = 0;
+		spin_lock_init(&c->fifo_lock);
+		INIT_LIST_HEAD(&c->fifo);
+		INIT_LIST_HEAD(&c->trash_fifo);
+		INIT_LIST_HEAD(&c->halt_fifo);
+		init_completion(&c->cleanup);
+		atomic_set(&c->mbo_ref, 0);
+		mutex_init(&c->start_mutex);
+		mutex_init(&c->nq_mutex);
+		list_add_tail(&c->list, &inst->channel_list);
+	}
+	pr_info("registered new MOST device mdev%d (%s)\n",
+		inst->dev_id, iface->description);
+	return &inst->kobj;
+
+free_instance:
+	pr_info("Failed allocate channel(s)\n");
+	list_del(&inst->list);
+	ida_simple_remove(&mdev_id, id);
+	destroy_most_inst_obj(inst);
+	return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(most_register_interface);
+
+/**
+ * most_deregister_interface - deregisters an interface with core
+ * @iface: pointer to the interface instance description.
+ *
+ * Before removing an interface instance from the list, all running
+ * channels are stopped and poisoned.
+ */
+void most_deregister_interface(struct most_interface *iface)
+{
+	struct most_inst_obj *i = iface->priv;
+	struct most_c_obj *c;
+
+	if (unlikely(!i)) {
+		pr_info("Bad Interface\n");
+		return;
+	}
+	pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
+		iface->description);
+
+	list_for_each_entry(c, &i->channel_list, list) {
+		if (c->aim0.ptr)
+			c->aim0.ptr->disconnect_channel(c->iface,
+							c->channel_id);
+		if (c->aim1.ptr)
+			c->aim1.ptr->disconnect_channel(c->iface,
+							c->channel_id);
+		c->aim0.ptr = NULL;
+		c->aim1.ptr = NULL;
+	}
+
+	ida_simple_remove(&mdev_id, i->dev_id);
+	list_del(&i->list);
+	destroy_most_inst_obj(i);
+}
+EXPORT_SYMBOL_GPL(most_deregister_interface);
+
+/**
+ * most_stop_enqueue - prevents core from enqueueing MBOs
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This is called by an HDM that _cannot_ attend to its duties and
+ * is imminent to get run over by the core. The core is not going to
+ * enqueue any further packets unless the flagging HDM calls
+ * most_resume enqueue().
+ */
+void most_stop_enqueue(struct most_interface *iface, int id)
+{
+	struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+	if (!c)
+		return;
+
+	mutex_lock(&c->nq_mutex);
+	c->enqueue_halt = true;
+	mutex_unlock(&c->nq_mutex);
+}
+EXPORT_SYMBOL_GPL(most_stop_enqueue);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * sitting in the wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int id)
+{
+	struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+	if (!c)
+		return;
+
+	mutex_lock(&c->nq_mutex);
+	c->enqueue_halt = false;
+	mutex_unlock(&c->nq_mutex);
+
+	wake_up_interruptible(&c->hdm_fifo_wq);
+}
+EXPORT_SYMBOL_GPL(most_resume_enqueue);
+
+static int __init most_init(void)
+{
+	int err;
+
+	pr_info("init()\n");
+	INIT_LIST_HEAD(&instance_list);
+	INIT_LIST_HEAD(&aim_list);
+	ida_init(&mdev_id);
+
+	err = bus_register(&most_bus);
+	if (err) {
+		pr_info("Cannot register most bus\n");
+		return err;
+	}
+
+	most_class = class_create(THIS_MODULE, "most");
+	if (IS_ERR(most_class)) {
+		pr_info("No udev support.\n");
+		err = PTR_ERR(most_class);
+		goto exit_bus;
+	}
+
+	err = driver_register(&mostcore);
+	if (err) {
+		pr_info("Cannot register core driver\n");
+		goto exit_class;
+	}
+
+	core_dev = device_create(most_class, NULL, 0, NULL, "mostcore");
+	if (IS_ERR(core_dev)) {
+		err = PTR_ERR(core_dev);
+		goto exit_driver;
+	}
+
+	most_aim_kset = kset_create_and_add("aims", NULL, &core_dev->kobj);
+	if (!most_aim_kset) {
+		err = -ENOMEM;
+		goto exit_class_container;
+	}
+
+	most_inst_kset = kset_create_and_add("devices", NULL, &core_dev->kobj);
+	if (!most_inst_kset) {
+		err = -ENOMEM;
+		goto exit_driver_kset;
+	}
+
+	return 0;
+
+exit_driver_kset:
+	kset_unregister(most_aim_kset);
+exit_class_container:
+	device_destroy(most_class, 0);
+exit_driver:
+	driver_unregister(&mostcore);
+exit_class:
+	class_destroy(most_class);
+exit_bus:
+	bus_unregister(&most_bus);
+	return err;
+}
+
+static void __exit most_exit(void)
+{
+	struct most_inst_obj *i, *i_tmp;
+	struct most_aim_obj *d, *d_tmp;
+
+	pr_info("exit core module\n");
+	list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
+		destroy_most_aim_obj(d);
+	}
+
+	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+		list_del(&i->list);
+		destroy_most_inst_obj(i);
+	}
+	kset_unregister(most_inst_kset);
+	kset_unregister(most_aim_kset);
+	device_destroy(most_class, 0);
+	driver_unregister(&mostcore);
+	class_destroy(most_class);
+	bus_unregister(&most_bus);
+	ida_destroy(&mdev_id);
+}
+
+module_init(most_init);
+module_exit(most_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@xxxxxxxxxxxxx>");
+MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
diff --git a/drivers/staging/most/core.h b/drivers/staging/most/core.h
new file mode 100644
index 0000000..a6e618c
--- /dev/null
+++ b/drivers/staging/most/core.h
@@ -0,0 +1,325 @@
+/*
+ * most.h - Interface between MostCore,
+ *   Hardware Dependent Module (HDM) and Application Interface Module (AIM).
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/*
+ * Authors:
+ *   Andrey Shvetsov <andrey.shvetsov@xxxxxx>
+ *   Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
+ *   Sebastian Graf
+ */
+
+#ifndef __MOST_CORE_H__
+#define __MOST_CORE_H__
+
+#include <linux/types.h>
+
+struct kobject;
+struct module;
+
+/**
+ * Interface type
+ */
+enum most_interface_type {
+	ITYPE_LOOPBACK = 1,
+	ITYPE_I2C,
+	ITYPE_I2S,
+	ITYPE_TSI,
+	ITYPE_HBI,
+	ITYPE_MEDIALB_DIM,
+	ITYPE_MEDIALB_DIM2,
+	ITYPE_USB,
+	ITYPE_PCIE
+};
+
+/**
+ * Channel direction.
+ */
+enum most_channel_direction {
+	MOST_CH_RX = 1 << 0,
+	MOST_CH_TX = 1 << 1,
+};
+
+/**
+ * Channel data type.
+ */
+enum most_channel_data_type {
+	MOST_CH_CONTROL = 1 << 0,
+	MOST_CH_ASYNC = 1 << 1,
+	MOST_CH_ISOC = 1 << 2,
+	MOST_CH_SYNC = 1 << 5,
+};
+
+enum mbo_status_flags {
+	/* MBO was processed successfully (data was send or received )*/
+	MBO_SUCCESS = 0,
+	/* The MBO contains wrong or missing information.  */
+	MBO_E_INVAL,
+	/* MBO was completed as HDM Channel will be closed */
+	MBO_E_CLOSE,
+};
+
+/**
+ * struct most_channel_capability - Channel capability
+ * @direction: Supported channel directions.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_direction. Zero is allowed value and means
+ * "channel may not be used".
+ * @data_type: Supported channel data types.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_data_type. Zero is allowed value and means
+ * "channel may not be used".
+ * @num_buffer_packet: Maximum number of buffers supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @buffer_size_packet: Maximum buffer size supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @num_buffer_streaming: Maximum number of buffers supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @buffer_size_streaming: Maximum buffer size supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @name_suffix: Optional suffix providean by an HDM that is attached to the
+ * regular channel name.
+ *
+ * Describes the capabilities of a MostCore channel like supported Data Types
+ * and directions. This information is provided by an HDM for the MostCore.
+ *
+ * The Core creates read only sysfs attribute files in
+ * /sys/devices/virtual/most/mostcore/devices/mdev-#/mdev#-ch#/ with the
+ * following attributes:
+ *	-available_directions
+ *	-available_datatypes
+ *	-number_of_packet_buffers
+ *	-number_of_stream_buffers
+ *	-size_of_packet_buffer
+ *	-size_of_stream_buffer
+ * where content of each file is a string with all supported properties of this
+ * very channel attribute.
+ */
+struct most_channel_capability {
+	u16 direction;
+	u16 data_type;
+	u16 num_buffers_packet;
+	u16 buffer_size_packet;
+	u16 num_buffers_streaming;
+	u16 buffer_size_streaming;
+	const char *name_suffix;
+};
+
+/**
+ * struct most_channel_config - stores channel configuration
+ * @direction: direction of the channel
+ * @data_type: data type travelling over this channel
+ * @num_buffers: number of buffers
+ * @buffer_size: size of a buffer for AIM.
+ * Buffer size may be cutted down by HDM in a configure callback
+ * to match to a given interface and channel type.
+ * @extra_len: additional buffer space for internal HDM purposes like padding.
+ * May be set by HDM in a configure callback if needed.
+ * @subbuffer_size: size of a subbuffer
+ * @packets_per_xact: number of MOST frames that are packet inside one USB
+ *		      packet. This is USB specific
+ *
+ * Describes the configuration for a MostCore channel. This information is
+ * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
+ * parameter of the "configure" function call.
+ */
+struct most_channel_config {
+	enum most_channel_direction direction;
+	enum most_channel_data_type data_type;
+	u16 num_buffers;
+	u16 buffer_size;
+	u16 extra_len;
+	u16 subbuffer_size;
+	u16 packets_per_xact;
+};
+
+/*
+ * struct mbo - MOST Buffer Object.
+ * @context: context for core completion handler
+ * @priv: private data for HDM
+ *
+ *	public: documented fields that are used for the communications
+ *	between MostCore and HDMs
+ *
+ * @list: list head for use by the mbo's current owner
+ * @ifp: (in) associated interface instance
+ * @hdm_channel_id: (in) HDM channel instance
+ * @virt_address: (in) kernel virtual address of the buffer
+ * @bus_address: (in) bus address of the buffer
+ * @buffer_length: (in) buffer payload length
+ * @processed_length: (out) processed length
+ * @status: (out) transfer status
+ * @complete: (in) completion routine
+ *
+ * The MostCore allocates and initializes the MBO.
+ *
+ * The HDM receives MBO for transfer from MostCore with the call to enqueue().
+ * The HDM copies the data to- or from the buffer depending on configured
+ * channel direction, set "processed_length" and "status" and completes
+ * the transfer procedure by calling the completion routine.
+ *
+ * At the end the MostCore deallocates the MBO or recycles it for further
+ * transfers for the same or different HDM.
+ *
+ * Directions of usage:
+ * The core driver should never access any MBO fields (even if marked
+ * as "public") while the MBO is owned by an HDM. The ownership starts with
+ * the call of enqueue() and ends with the call of its complete() routine.
+ *
+ *					II.
+ * Every HDM attached to the core driver _must_ ensure that it returns any MBO
+ * it owns (due to a previous call to enqueue() by the core driver) before it
+ * de-registers an interface or gets unloaded from the kernel. If this direction
+ * is violated memory leaks will occur, since the core driver does _not_ track
+ * MBOs it is currently not in control of.
+ *
+ */
+struct mbo {
+	void *context;
+	void *priv;
+	struct list_head list;
+	struct most_interface *ifp;
+	int *num_buffers_ptr;
+	u16 hdm_channel_id;
+	void *virt_address;
+	dma_addr_t bus_address;
+	u16 buffer_length;
+	u16 processed_length;
+	enum mbo_status_flags status;
+	void (*complete)(struct mbo *);
+};
+
+/**
+ * Interface instance description.
+ *
+ * Describes one instance of an interface like Medusa PCIe or Vantage USB.
+ * This structure is allocated and initialized in the HDM. MostCore may not
+ * modify this structure.
+ *
+ * @interface Interface type. \sa most_interface_type.
+ * @description PRELIMINARY.
+ *   Unique description of the device instance from point of view of the
+ *   interface in free text form (ASCII).
+ *   It may be a hexadecimal presentation of the memory address for the MediaLB
+ *   IP or USB device ID with USB properties for USB interface, etc.
+ * @num_channels Number of channels and size of the channel_vector.
+ * @channel_vector Properties of the channels.
+ *   Array index represents channel ID by the driver.
+ * @configure Callback to change data type for the channel of the
+ *   interface instance. May be zero if the instance of the interface is not
+ *   configurable. Parameter channel_config describes direction and data
+ *   type for the channel, configured by the higher level. The content of
+ * @enqueue Delivers MBO to the HDM for processing.
+ *   After HDM completes Rx- or Tx- operation the processed MBO shall
+ *   be returned back to the MostCore using completion routine.
+ *   The reason to get the MBO delivered from the MostCore after the channel
+ *   is poisoned is the re-opening of the channel by the application.
+ *   In this case the HDM shall hold MBOs and service the channel as usual.
+ *   The HDM must be able to hold at least one MBO for each channel.
+ *   The callback returns a negative value on error, otherwise 0.
+ * @poison_channel Informs HDM about closing the channel. The HDM shall
+ *   cancel all transfers and synchronously or asynchronously return
+ *   all enqueued for this channel MBOs using the completion routine.
+ *   The callback returns a negative value on error, otherwise 0.
+ * @request_netinfo: triggers retrieving of network info from the HDM by
+ *   means of "Message exchange over MDP/MEP"
+ *   The call of the function request_netinfo with the parameter on_netinfo as
+ *   NULL prohibits use of the previously obtained function pointer.
+ * @priv Private field used by mostcore to store context information.
+ */
+struct most_interface {
+	struct module *mod;
+	enum most_interface_type interface;
+	const char *description;
+	int num_channels;
+	struct most_channel_capability *channel_vector;
+	int (*configure)(struct most_interface *iface, int channel_idx,
+			 struct most_channel_config *channel_config);
+	int (*enqueue)(struct most_interface *iface, int channel_idx,
+		       struct mbo *mbo);
+	int (*poison_channel)(struct most_interface *iface, int channel_idx);
+	void (*request_netinfo)(struct most_interface *iface, int channel_idx,
+				void (*on_netinfo)(struct most_interface *iface,
+						   unsigned char link_stat,
+						   unsigned char *mac_addr));
+	void *priv;
+};
+
+/**
+ * struct most_aim - identifies MOST device driver to mostcore
+ * @name: Driver name
+ * @probe_channel: function for core to notify driver about channel connection
+ * @disconnect_channel: callback function to disconnect a certain channel
+ * @rx_completion: completion handler for received packets
+ * @tx_completion: completion handler for transmitted packets
+ * @context: context pointer to be used by mostcore
+ */
+struct most_aim {
+	const char *name;
+	int (*probe_channel)(struct most_interface *iface, int channel_idx,
+			     struct most_channel_config *cfg,
+			     struct kobject *parent, char *name);
+	int (*disconnect_channel)(struct most_interface *iface,
+				  int channel_idx);
+	int (*rx_completion)(struct mbo *mbo);
+	int (*tx_completion)(struct most_interface *iface, int channel_idx);
+	void *context;
+};
+
+/**
+ * most_register_interface - Registers instance of the interface.
+ * @iface: Pointer to the interface instance description.
+ *
+ * Returns a pointer to the kobject of the generated instance.
+ *
+ * Note: HDM has to ensure that any reference held on the kobj is
+ * released before deregistering the interface.
+ */
+struct kobject *most_register_interface(struct most_interface *iface);
+
+/**
+ * Deregisters instance of the interface.
+ * @intf_instance Pointer to the interface instance description.
+ */
+void most_deregister_interface(struct most_interface *iface);
+void most_submit_mbo(struct mbo *mbo);
+
+/**
+ * most_stop_enqueue - prevents core from enqueing MBOs
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ */
+void most_stop_enqueue(struct most_interface *iface, int channel_idx);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * in wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int channel_idx);
+int most_register_aim(struct most_aim *aim);
+int most_deregister_aim(struct most_aim *aim);
+struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
+			 struct most_aim *);
+void most_put_mbo(struct mbo *mbo);
+int channel_has_mbo(struct most_interface *iface, int channel_idx,
+		    struct most_aim *aim);
+int most_start_channel(struct most_interface *iface, int channel_idx,
+		       struct most_aim *);
+int most_stop_channel(struct most_interface *iface, int channel_idx,
+		      struct most_aim *);
+
+#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/most/dim2/Kconfig b/drivers/staging/most/dim2/Kconfig
new file mode 100644
index 0000000..e39c4e5
--- /dev/null
+++ b/drivers/staging/most/dim2/Kconfig
@@ -0,0 +1,16 @@
+#
+# MediaLB configuration
+#
+
+config MOST_DIM2
+	tristate "DIM2"
+	depends on HAS_IOMEM
+
+	---help---
+	  Say Y here if you want to connect via MediaLB to network transceiver.
+	  This device driver is platform dependent and needs an additional
+	  platform driver to be installed. For more information contact
+	  maintainer of this driver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called most_dim2.
diff --git a/drivers/staging/most/dim2/Makefile b/drivers/staging/most/dim2/Makefile
new file mode 100644
index 0000000..66676f5
--- /dev/null
+++ b/drivers/staging/most/dim2/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_DIM2) += most_dim2.o
+
+most_dim2-objs := dim2.o hal.o sysfs.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
new file mode 100644
index 0000000..cc8563f
--- /dev/null
+++ b/drivers/staging/most/dim2/dim2.c
@@ -0,0 +1,918 @@
+/*
+ * dim2.c - MediaLB DIM2 Hardware Dependent Module
+ *
+ * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+
+#include <most/core.h>
+#include "hal.h"
+#include "dim2.h"
+#include "errors.h"
+#include "sysfs.h"
+
+#define DMA_CHANNELS (32 - 1)  /* channel 0 is a system channel */
+
+#define MAX_BUFFERS_PACKET      32
+#define MAX_BUFFERS_STREAMING   32
+#define MAX_BUF_SIZE_PACKET     2048
+#define MAX_BUF_SIZE_STREAMING  (8 * 1024)
+
+/* command line parameter to select clock speed */
+static char *clock_speed;
+module_param(clock_speed, charp, 0000);
+MODULE_PARM_DESC(clock_speed, "MediaLB Clock Speed");
+
+/*
+ * The parameter representing the number of frames per sub-buffer for
+ * synchronous channels.  Valid values: [0 .. 6].
+ *
+ * The values 0, 1, 2, 3, 4, 5, 6 represent corresponding number of frames per
+ * sub-buffer 1, 2, 4, 8, 16, 32, 64.
+ */
+static u8 fcnt = 4;  /* (1 << fcnt) frames per subbuffer */
+module_param(fcnt, byte, 0000);
+MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a power of 2");
+
+static DEFINE_SPINLOCK(dim_lock);
+
+static void dim2_tasklet_fn(unsigned long data);
+static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
+
+/**
+ * struct hdm_channel - private structure to keep channel specific data
+ * @is_initialized: identifier to know whether the channel is initialized
+ * @ch: HAL specific channel data
+ * @pending_list: list to keep MBO's before starting transfer
+ * @started_list: list to keep MBO's after starting transfer
+ * @direction: channel direction (TX or RX)
+ * @data_type: channel data type
+ */
+struct hdm_channel {
+	char name[sizeof "caNNN"];
+	bool is_initialized;
+	struct dim_channel ch;
+	struct list_head pending_list;	/* before dim_enqueue_buffer() */
+	struct list_head started_list;	/* after dim_enqueue_buffer() */
+	enum most_channel_direction direction;
+	enum most_channel_data_type data_type;
+};
+
+/**
+ * struct dim2_hdm - private structure to keep interface specific data
+ * @hch: an array of channel specific data
+ * @most_iface: most interface structure
+ * @capabilities: an array of channel capability data
+ * @io_base: I/O register base address
+ * @clk_speed: user selectable (through command line parameter) clock speed
+ * @netinfo_task: thread to deliver network status
+ * @netinfo_waitq: waitq for the thread to sleep
+ * @deliver_netinfo: to identify whether network status received
+ * @mac_addrs: INIC mac address
+ * @link_state: network link state
+ * @atx_idx: index of async tx channel
+ */
+struct dim2_hdm {
+	struct hdm_channel hch[DMA_CHANNELS];
+	struct most_channel_capability capabilities[DMA_CHANNELS];
+	struct most_interface most_iface;
+	char name[16 + sizeof "dim2-"];
+	void __iomem *io_base;
+	int clk_speed;
+	struct task_struct *netinfo_task;
+	wait_queue_head_t netinfo_waitq;
+	int deliver_netinfo;
+	unsigned char mac_addrs[6];
+	unsigned char link_state;
+	int atx_idx;
+	struct medialb_bus bus;
+	void (*on_netinfo)(struct most_interface *,
+			   unsigned char, unsigned char *);
+};
+
+#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
+
+/* Macro to identify a network status message */
+#define PACKET_IS_NET_INFO(p)  \
+	(((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
+	 ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
+
+bool dim2_sysfs_get_state_cb(void)
+{
+	bool state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	state = dim_get_lock_state();
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	return state;
+}
+
+/**
+ * dimcb_io_read - callback from HAL to read an I/O register
+ * @ptr32: register address
+ */
+u32 dimcb_io_read(u32 __iomem *ptr32)
+{
+	return readl(ptr32);
+}
+
+/**
+ * dimcb_io_write - callback from HAL to write value to an I/O register
+ * @ptr32: register address
+ * @value: value to write
+ */
+void dimcb_io_write(u32 __iomem *ptr32, u32 value)
+{
+	writel(value, ptr32);
+}
+
+/**
+ * dimcb_on_error - callback from HAL to report miscommunication between
+ * HDM and HAL
+ * @error_id: Error ID
+ * @error_message: Error message. Some text in a free format
+ */
+void dimcb_on_error(u8 error_id, const char *error_message)
+{
+	pr_err("dimcb_on_error: error_id - %d, error_message - %s\n", error_id,
+	       error_message);
+}
+
+/**
+ * startup_dim - initialize the dim2 interface
+ * @pdev: platform device
+ *
+ * Get the value of command line parameter "clock_speed" if given or use the
+ * default value, enable the clock and PLL, and initialize the dim2 interface.
+ */
+static int startup_dim(struct platform_device *pdev)
+{
+	struct dim2_hdm *dev = platform_get_drvdata(pdev);
+	struct dim2_platform_data *pdata = pdev->dev.platform_data;
+	u8 hal_ret;
+
+	dev->clk_speed = -1;
+
+	if (clock_speed) {
+		if (!strcmp(clock_speed, "256fs"))
+			dev->clk_speed = CLK_256FS;
+		else if (!strcmp(clock_speed, "512fs"))
+			dev->clk_speed = CLK_512FS;
+		else if (!strcmp(clock_speed, "1024fs"))
+			dev->clk_speed = CLK_1024FS;
+		else if (!strcmp(clock_speed, "2048fs"))
+			dev->clk_speed = CLK_2048FS;
+		else if (!strcmp(clock_speed, "3072fs"))
+			dev->clk_speed = CLK_3072FS;
+		else if (!strcmp(clock_speed, "4096fs"))
+			dev->clk_speed = CLK_4096FS;
+		else if (!strcmp(clock_speed, "6144fs"))
+			dev->clk_speed = CLK_6144FS;
+		else if (!strcmp(clock_speed, "8192fs"))
+			dev->clk_speed = CLK_8192FS;
+	}
+
+	if (dev->clk_speed == -1) {
+		pr_info("Bad or missing clock speed parameter, using default value: 3072fs\n");
+		dev->clk_speed = CLK_3072FS;
+	} else {
+		pr_info("Selected clock speed: %s\n", clock_speed);
+	}
+	if (pdata && pdata->init) {
+		int ret = pdata->init(pdata, dev->io_base, dev->clk_speed);
+
+		if (ret)
+			return ret;
+	}
+
+	pr_info("sync: num of frames per sub-buffer: %u\n", fcnt);
+	hal_ret = dim_startup(dev->io_base, dev->clk_speed, fcnt);
+	if (hal_ret != DIM_NO_ERROR) {
+		pr_err("dim_startup failed: %d\n", hal_ret);
+		if (pdata && pdata->destroy)
+			pdata->destroy(pdata);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * try_start_dim_transfer - try to transfer a buffer on a channel
+ * @hdm_ch: channel specific data
+ *
+ * Transfer a buffer from pending_list if the channel is ready
+ */
+static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
+{
+	u16 buf_size;
+	struct list_head *head = &hdm_ch->pending_list;
+	struct mbo *mbo;
+	unsigned long flags;
+	struct dim_ch_state_t st;
+
+	BUG_ON(!hdm_ch);
+	BUG_ON(!hdm_ch->is_initialized);
+
+	spin_lock_irqsave(&dim_lock, flags);
+	if (list_empty(head)) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		return -EAGAIN;
+	}
+
+	if (!dim_get_channel_state(&hdm_ch->ch, &st)->ready) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		return -EAGAIN;
+	}
+
+	mbo = list_first_entry(head, struct mbo, list);
+	buf_size = mbo->buffer_length;
+
+	if (dim_dbr_space(&hdm_ch->ch) < buf_size) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		return -EAGAIN;
+	}
+
+	BUG_ON(mbo->bus_address == 0);
+	if (!dim_enqueue_buffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
+		list_del(head->next);
+		spin_unlock_irqrestore(&dim_lock, flags);
+		mbo->processed_length = 0;
+		mbo->status = MBO_E_INVAL;
+		mbo->complete(mbo);
+		return -EFAULT;
+	}
+
+	list_move_tail(head->next, &hdm_ch->started_list);
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	return 0;
+}
+
+/**
+ * deliver_netinfo_thread - thread to deliver network status to mostcore
+ * @data: private data
+ *
+ * Wait for network status and deliver it to mostcore once it is received
+ */
+static int deliver_netinfo_thread(void *data)
+{
+	struct dim2_hdm *dev = data;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(dev->netinfo_waitq,
+					 dev->deliver_netinfo ||
+					 kthread_should_stop());
+
+		if (dev->deliver_netinfo) {
+			dev->deliver_netinfo--;
+			if (dev->on_netinfo) {
+				dev->on_netinfo(&dev->most_iface,
+						dev->link_state,
+						dev->mac_addrs);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * retrieve_netinfo - retrieve network status from received buffer
+ * @dev: private data
+ * @mbo: received MBO
+ *
+ * Parse the message in buffer and get node address, link state, MAC address.
+ * Wake up a thread to deliver this status to mostcore
+ */
+static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
+{
+	u8 *data = mbo->virt_address;
+
+	pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
+	dev->link_state = data[18];
+	pr_info("NIState: %d\n", dev->link_state);
+	memcpy(dev->mac_addrs, data + 19, 6);
+	dev->deliver_netinfo++;
+	wake_up_interruptible(&dev->netinfo_waitq);
+}
+
+/**
+ * service_done_flag - handle completed buffers
+ * @dev: private data
+ * @ch_idx: channel index
+ *
+ * Return back the completed buffers to mostcore, using completion callback
+ */
+static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
+{
+	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+	struct dim_ch_state_t st;
+	struct list_head *head;
+	struct mbo *mbo;
+	int done_buffers;
+	unsigned long flags;
+	u8 *data;
+
+	BUG_ON(!hdm_ch);
+	BUG_ON(!hdm_ch->is_initialized);
+
+	spin_lock_irqsave(&dim_lock, flags);
+
+	done_buffers = dim_get_channel_state(&hdm_ch->ch, &st)->done_buffers;
+	if (!done_buffers) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		return;
+	}
+
+	if (!dim_detach_buffers(&hdm_ch->ch, done_buffers)) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	head = &hdm_ch->started_list;
+
+	while (done_buffers) {
+		spin_lock_irqsave(&dim_lock, flags);
+		if (list_empty(head)) {
+			spin_unlock_irqrestore(&dim_lock, flags);
+			pr_crit("hard error: started_mbo list is empty whereas DIM2 has sent buffers\n");
+			break;
+		}
+
+		mbo = list_first_entry(head, struct mbo, list);
+		list_del(head->next);
+		spin_unlock_irqrestore(&dim_lock, flags);
+
+		data = mbo->virt_address;
+
+		if (hdm_ch->data_type == MOST_CH_ASYNC &&
+		    hdm_ch->direction == MOST_CH_RX &&
+		    PACKET_IS_NET_INFO(data)) {
+			retrieve_netinfo(dev, mbo);
+
+			spin_lock_irqsave(&dim_lock, flags);
+			list_add_tail(&mbo->list, &hdm_ch->pending_list);
+			spin_unlock_irqrestore(&dim_lock, flags);
+		} else {
+			if (hdm_ch->data_type == MOST_CH_CONTROL ||
+			    hdm_ch->data_type == MOST_CH_ASYNC) {
+				u32 const data_size =
+					(u32)data[0] * 256 + data[1] + 2;
+
+				mbo->processed_length =
+					min_t(u32, data_size,
+					      mbo->buffer_length);
+			} else {
+				mbo->processed_length = mbo->buffer_length;
+			}
+			mbo->status = MBO_SUCCESS;
+			mbo->complete(mbo);
+		}
+
+		done_buffers--;
+	}
+}
+
+static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
+						struct dim_channel **buffer)
+{
+	int idx = 0;
+	int ch_idx;
+
+	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+		if (dev->hch[ch_idx].is_initialized)
+			buffer[idx++] = &dev->hch[ch_idx].ch;
+	}
+	buffer[idx++] = NULL;
+
+	return buffer;
+}
+
+static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
+{
+	struct dim2_hdm *dev = _dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	dim_service_mlb_int_irq();
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	if (dev->atx_idx >= 0 && dev->hch[dev->atx_idx].is_initialized)
+		while (!try_start_dim_transfer(dev->hch + dev->atx_idx))
+			continue;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * dim2_tasklet_fn - tasklet function
+ * @data: private data
+ *
+ * Service each initialized channel, if needed
+ */
+static void dim2_tasklet_fn(unsigned long data)
+{
+	struct dim2_hdm *dev = (struct dim2_hdm *)data;
+	unsigned long flags;
+	int ch_idx;
+
+	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+		if (!dev->hch[ch_idx].is_initialized)
+			continue;
+
+		spin_lock_irqsave(&dim_lock, flags);
+		dim_service_channel(&dev->hch[ch_idx].ch);
+		spin_unlock_irqrestore(&dim_lock, flags);
+
+		service_done_flag(dev, ch_idx);
+		while (!try_start_dim_transfer(dev->hch + ch_idx))
+			continue;
+	}
+}
+
+/**
+ * dim2_ahb_isr - interrupt service routine
+ * @irq: irq number
+ * @_dev: private data
+ *
+ * Acknowledge the interrupt and schedule a tasklet to service channels.
+ * Return IRQ_HANDLED.
+ */
+static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
+{
+	struct dim2_hdm *dev = _dev;
+	struct dim_channel *buffer[DMA_CHANNELS + 1];
+	unsigned long flags;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	dim_service_ahb_int_irq(get_active_channels(dev, buffer));
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	dim2_tasklet.data = (unsigned long)dev;
+	tasklet_schedule(&dim2_tasklet);
+	return IRQ_HANDLED;
+}
+
+/**
+ * complete_all_mbos - complete MBO's in a list
+ * @head: list head
+ *
+ * Delete all the entries in list and return back MBO's to mostcore using
+ * completion call back.
+ */
+static void complete_all_mbos(struct list_head *head)
+{
+	unsigned long flags;
+	struct mbo *mbo;
+
+	for (;;) {
+		spin_lock_irqsave(&dim_lock, flags);
+		if (list_empty(head)) {
+			spin_unlock_irqrestore(&dim_lock, flags);
+			break;
+		}
+
+		mbo = list_first_entry(head, struct mbo, list);
+		list_del(head->next);
+		spin_unlock_irqrestore(&dim_lock, flags);
+
+		mbo->processed_length = 0;
+		mbo->status = MBO_E_CLOSE;
+		mbo->complete(mbo);
+	}
+}
+
+/**
+ * configure_channel - initialize a channel
+ * @iface: interface the channel belongs to
+ * @channel: channel to be configured
+ * @channel_config: structure that holds the configuration information
+ *
+ * Receives configuration information from mostcore and initialize
+ * the corresponding channel. Return 0 on success, negative on failure.
+ */
+static int configure_channel(struct most_interface *most_iface, int ch_idx,
+			     struct most_channel_config *ccfg)
+{
+	struct dim2_hdm *dev = iface_to_hdm(most_iface);
+	bool const is_tx = ccfg->direction == MOST_CH_TX;
+	u16 const sub_size = ccfg->subbuffer_size;
+	u16 const buf_size = ccfg->buffer_size;
+	u16 new_size;
+	unsigned long flags;
+	u8 hal_ret;
+	int const ch_addr = ch_idx * 2 + 2;
+	struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+	if (hdm_ch->is_initialized)
+		return -EPERM;
+
+	switch (ccfg->data_type) {
+	case MOST_CH_CONTROL:
+		new_size = dim_norm_ctrl_async_buffer_size(buf_size);
+		if (new_size == 0) {
+			pr_err("%s: too small buffer size\n", hdm_ch->name);
+			return -EINVAL;
+		}
+		ccfg->buffer_size = new_size;
+		if (new_size != buf_size)
+			pr_warn("%s: fixed buffer size (%d -> %d)\n",
+				hdm_ch->name, buf_size, new_size);
+		spin_lock_irqsave(&dim_lock, flags);
+		hal_ret = dim_init_control(&hdm_ch->ch, is_tx, ch_addr,
+					   is_tx ? new_size * 2 : new_size);
+		break;
+	case MOST_CH_ASYNC:
+		new_size = dim_norm_ctrl_async_buffer_size(buf_size);
+		if (new_size == 0) {
+			pr_err("%s: too small buffer size\n", hdm_ch->name);
+			return -EINVAL;
+		}
+		ccfg->buffer_size = new_size;
+		if (new_size != buf_size)
+			pr_warn("%s: fixed buffer size (%d -> %d)\n",
+				hdm_ch->name, buf_size, new_size);
+		spin_lock_irqsave(&dim_lock, flags);
+		hal_ret = dim_init_async(&hdm_ch->ch, is_tx, ch_addr,
+					 is_tx ? new_size * 2 : new_size);
+		break;
+	case MOST_CH_ISOC:
+		new_size = dim_norm_isoc_buffer_size(buf_size, sub_size);
+		if (new_size == 0) {
+			pr_err("%s: invalid sub-buffer size or too small buffer size\n",
+			       hdm_ch->name);
+			return -EINVAL;
+		}
+		ccfg->buffer_size = new_size;
+		if (new_size != buf_size)
+			pr_warn("%s: fixed buffer size (%d -> %d)\n",
+				hdm_ch->name, buf_size, new_size);
+		spin_lock_irqsave(&dim_lock, flags);
+		hal_ret = dim_init_isoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+		break;
+	case MOST_CH_SYNC:
+		new_size = dim_norm_sync_buffer_size(buf_size, sub_size);
+		if (new_size == 0) {
+			pr_err("%s: invalid sub-buffer size or too small buffer size\n",
+			       hdm_ch->name);
+			return -EINVAL;
+		}
+		ccfg->buffer_size = new_size;
+		if (new_size != buf_size)
+			pr_warn("%s: fixed buffer size (%d -> %d)\n",
+				hdm_ch->name, buf_size, new_size);
+		spin_lock_irqsave(&dim_lock, flags);
+		hal_ret = dim_init_sync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+		break;
+	default:
+		pr_err("%s: configure failed, bad channel type: %d\n",
+		       hdm_ch->name, ccfg->data_type);
+		return -EINVAL;
+	}
+
+	if (hal_ret != DIM_NO_ERROR) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
+		       hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
+		return -ENODEV;
+	}
+
+	hdm_ch->data_type = ccfg->data_type;
+	hdm_ch->direction = ccfg->direction;
+	hdm_ch->is_initialized = true;
+
+	if (hdm_ch->data_type == MOST_CH_ASYNC &&
+	    hdm_ch->direction == MOST_CH_TX &&
+	    dev->atx_idx < 0)
+		dev->atx_idx = ch_idx;
+
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	return 0;
+}
+
+/**
+ * enqueue - enqueue a buffer for data transfer
+ * @iface: intended interface
+ * @channel: ID of the channel the buffer is intended for
+ * @mbo: pointer to the buffer object
+ *
+ * Push the buffer into pending_list and try to transfer one buffer from
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int enqueue(struct most_interface *most_iface, int ch_idx,
+		   struct mbo *mbo)
+{
+	struct dim2_hdm *dev = iface_to_hdm(most_iface);
+	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+	unsigned long flags;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+	if (!hdm_ch->is_initialized)
+		return -EPERM;
+
+	if (mbo->bus_address == 0)
+		return -EFAULT;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	list_add_tail(&mbo->list, &hdm_ch->pending_list);
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	(void)try_start_dim_transfer(hdm_ch);
+
+	return 0;
+}
+
+/**
+ * request_netinfo - triggers retrieving of network info
+ * @iface: pointer to the interface
+ * @channel_id: corresponding channel ID
+ *
+ * Send a command to INIC which triggers retrieving of network info by means of
+ * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
+ */
+static void request_netinfo(struct most_interface *most_iface, int ch_idx,
+			    void (*on_netinfo)(struct most_interface *,
+					       unsigned char, unsigned char *))
+{
+	struct dim2_hdm *dev = iface_to_hdm(most_iface);
+	struct mbo *mbo;
+	u8 *data;
+
+	dev->on_netinfo = on_netinfo;
+	if (!on_netinfo)
+		return;
+
+	if (dev->atx_idx < 0) {
+		pr_err("Async Tx Not initialized\n");
+		return;
+	}
+
+	mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
+	if (!mbo)
+		return;
+
+	mbo->buffer_length = 5;
+
+	data = mbo->virt_address;
+
+	data[0] = 0x00; /* PML High byte */
+	data[1] = 0x03; /* PML Low byte */
+	data[2] = 0x02; /* PMHL */
+	data[3] = 0x08; /* FPH */
+	data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
+
+	most_submit_mbo(mbo);
+}
+
+/**
+ * poison_channel - poison buffers of a channel
+ * @iface: pointer to the interface the channel to be poisoned belongs to
+ * @channel_id: corresponding channel ID
+ *
+ * Destroy a channel and complete all the buffers in both started_list &
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int poison_channel(struct most_interface *most_iface, int ch_idx)
+{
+	struct dim2_hdm *dev = iface_to_hdm(most_iface);
+	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+	unsigned long flags;
+	u8 hal_ret;
+	int ret = 0;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+	if (!hdm_ch->is_initialized)
+		return -EPERM;
+
+	tasklet_disable(&dim2_tasklet);
+	spin_lock_irqsave(&dim_lock, flags);
+	hal_ret = dim_destroy_channel(&hdm_ch->ch);
+	hdm_ch->is_initialized = false;
+	if (ch_idx == dev->atx_idx)
+		dev->atx_idx = -1;
+	spin_unlock_irqrestore(&dim_lock, flags);
+	tasklet_enable(&dim2_tasklet);
+	if (hal_ret != DIM_NO_ERROR) {
+		pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
+		ret = -EFAULT;
+	}
+
+	complete_all_mbos(&hdm_ch->started_list);
+	complete_all_mbos(&hdm_ch->pending_list);
+
+	return ret;
+}
+
+/*
+ * dim2_probe - dim2 probe handler
+ * @pdev: platform device structure
+ *
+ * Register the dim2 interface with mostcore and initialize it.
+ * Return 0 on success, negative on failure.
+ */
+static int dim2_probe(struct platform_device *pdev)
+{
+	struct dim2_hdm *dev;
+	struct resource *res;
+	int ret, i;
+	struct kobject *kobj;
+	int irq;
+
+	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->atx_idx = -1;
+
+	platform_set_drvdata(pdev, dev);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dev->io_base))
+		return PTR_ERR(dev->io_base);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get ahb0_int irq: %d\n", irq);
+		return irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
+			       "dim2_ahb0_int", dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
+		return ret;
+	}
+
+	irq = platform_get_irq(pdev, 1);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get mlb_int irq: %d\n", irq);
+		return irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, dim2_mlb_isr, 0,
+			       "dim2_mlb_int", dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
+		return ret;
+	}
+
+	init_waitqueue_head(&dev->netinfo_waitq);
+	dev->deliver_netinfo = 0;
+	dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
+					"dim2_netinfo");
+	if (IS_ERR(dev->netinfo_task))
+		return PTR_ERR(dev->netinfo_task);
+
+	for (i = 0; i < DMA_CHANNELS; i++) {
+		struct most_channel_capability *cap = dev->capabilities + i;
+		struct hdm_channel *hdm_ch = dev->hch + i;
+
+		INIT_LIST_HEAD(&hdm_ch->pending_list);
+		INIT_LIST_HEAD(&hdm_ch->started_list);
+		hdm_ch->is_initialized = false;
+		snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
+
+		cap->name_suffix = hdm_ch->name;
+		cap->direction = MOST_CH_RX | MOST_CH_TX;
+		cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
+				 MOST_CH_ISOC | MOST_CH_SYNC;
+		cap->num_buffers_packet = MAX_BUFFERS_PACKET;
+		cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
+		cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
+		cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
+	}
+
+	{
+		const char *fmt;
+
+		if (sizeof(res->start) == sizeof(long long))
+			fmt = "dim2-%016llx";
+		else if (sizeof(res->start) == sizeof(long))
+			fmt = "dim2-%016lx";
+		else
+			fmt = "dim2-%016x";
+
+		snprintf(dev->name, sizeof(dev->name), fmt, res->start);
+	}
+
+	dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
+	dev->most_iface.description = dev->name;
+	dev->most_iface.num_channels = DMA_CHANNELS;
+	dev->most_iface.channel_vector = dev->capabilities;
+	dev->most_iface.configure = configure_channel;
+	dev->most_iface.enqueue = enqueue;
+	dev->most_iface.poison_channel = poison_channel;
+	dev->most_iface.request_netinfo = request_netinfo;
+
+	kobj = most_register_interface(&dev->most_iface);
+	if (IS_ERR(kobj)) {
+		ret = PTR_ERR(kobj);
+		dev_err(&pdev->dev, "failed to register MOST interface\n");
+		goto err_stop_thread;
+	}
+
+	ret = dim2_sysfs_probe(&dev->bus, kobj);
+	if (ret)
+		goto err_unreg_iface;
+
+	ret = startup_dim(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to initialize DIM2\n");
+		goto err_destroy_bus;
+	}
+
+	return 0;
+
+err_destroy_bus:
+	dim2_sysfs_destroy(&dev->bus);
+err_unreg_iface:
+	most_deregister_interface(&dev->most_iface);
+err_stop_thread:
+	kthread_stop(dev->netinfo_task);
+
+	return ret;
+}
+
+/**
+ * dim2_remove - dim2 remove handler
+ * @pdev: platform device structure
+ *
+ * Unregister the interface from mostcore
+ */
+static int dim2_remove(struct platform_device *pdev)
+{
+	struct dim2_hdm *dev = platform_get_drvdata(pdev);
+	struct dim2_platform_data *pdata = pdev->dev.platform_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	dim_shutdown();
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	if (pdata && pdata->destroy)
+		pdata->destroy(pdata);
+
+	dim2_sysfs_destroy(&dev->bus);
+	most_deregister_interface(&dev->most_iface);
+	kthread_stop(dev->netinfo_task);
+
+	/*
+	 * break link to local platform_device_id struct
+	 * to prevent crash by unload platform device module
+	 */
+	pdev->id_entry = NULL;
+
+	return 0;
+}
+
+static const struct platform_device_id dim2_id[] = {
+	{ "medialb_dim2" },
+	{ }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(platform, dim2_id);
+
+static struct platform_driver dim2_driver = {
+	.probe = dim2_probe,
+	.remove = dim2_remove,
+	.id_table = dim2_id,
+	.driver = {
+		.name = "hdm_dim2",
+	},
+};
+
+module_platform_driver(dim2_driver);
+
+MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@xxxxxxxxxxxxx>");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@xxxxxx>");
+MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/dim2/dim2.h b/drivers/staging/most/dim2/dim2.h
new file mode 100644
index 0000000..7fff99c
--- /dev/null
+++ b/drivers/staging/most/dim2/dim2.h
@@ -0,0 +1,27 @@
+/*
+ * dim2.h - MediaLB DIM2 HDM Header
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef DIM2_HDM_H
+#define	DIM2_HDM_H
+
+struct device;
+
+/* platform dependent data for dim2 interface */
+struct dim2_platform_data {
+	int (*init)(struct dim2_platform_data *pd, void __iomem *io_base,
+		    int clk_speed);
+	void (*destroy)(struct dim2_platform_data *pd);
+	void *priv;
+};
+
+#endif	/* DIM2_HDM_H */
diff --git a/drivers/staging/most/dim2/errors.h b/drivers/staging/most/dim2/errors.h
new file mode 100644
index 0000000..0cd8400
--- /dev/null
+++ b/drivers/staging/most/dim2/errors.h
@@ -0,0 +1,57 @@
+/*
+ * errors.h - Definitions of errors for DIM2 HAL API
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef _MOST_DIM_ERRORS_H
+#define _MOST_DIM_ERRORS_H
+
+/**
+ * MOST DIM errors.
+ */
+enum dim_errors_t {
+	/** Not an error */
+	DIM_NO_ERROR = 0,
+
+	/** Bad base address for DIM2 IP */
+	DIM_INIT_ERR_DIM_ADDR = 0x10,
+
+	/**< Bad MediaLB clock */
+	DIM_INIT_ERR_MLB_CLOCK,
+
+	/** Bad channel address */
+	DIM_INIT_ERR_CHANNEL_ADDRESS,
+
+	/** Out of DBR memory */
+	DIM_INIT_ERR_OUT_OF_MEMORY,
+
+	/** DIM API is called while DIM is not initialized successfully */
+	DIM_ERR_DRIVER_NOT_INITIALIZED = 0x20,
+
+	/**
+	 * Configuration does not respect hardware limitations
+	 * for isochronous or synchronous channels
+	 */
+	DIM_ERR_BAD_CONFIG,
+
+	/**
+	 * Buffer size does not respect hardware limitations
+	 * for isochronous or synchronous channels
+	 */
+	DIM_ERR_BAD_BUFFER_SIZE,
+
+	DIM_ERR_UNDERFLOW,
+
+	DIM_ERR_OVERFLOW,
+};
+
+#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/dim2/hal.c b/drivers/staging/most/dim2/hal.c
new file mode 100644
index 0000000..9f00ecc
--- /dev/null
+++ b/drivers/staging/most/dim2/hal.c
@@ -0,0 +1,985 @@
+/*
+ * hal.c - DIM2 HAL implementation
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@xxxxxx> */
+
+#include "hal.h"
+#include "errors.h"
+#include "reg.h"
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+
+/*
+ * Size factor for isochronous DBR buffer.
+ * Minimal value is 3.
+ */
+#define ISOC_DBR_FACTOR 3u
+
+/*
+ * Number of 32-bit units for DBR map.
+ *
+ * 1: block size is 512, max allocation is 16K
+ * 2: block size is 256, max allocation is 8K
+ * 4: block size is 128, max allocation is 4K
+ * 8: block size is 64, max allocation is 2K
+ *
+ * Min allocated space is block size.
+ * Max possible allocated space is 32 blocks.
+ */
+#define DBR_MAP_SIZE 2
+
+/* -------------------------------------------------------------------------- */
+/* not configurable area */
+
+#define CDT 0x00
+#define ADT 0x40
+#define MLB_CAT 0x80
+#define AHB_CAT 0x88
+
+#define DBR_SIZE  (16 * 1024) /* specified by IP */
+#define DBR_BLOCK_SIZE  (DBR_SIZE / 32 / DBR_MAP_SIZE)
+
+#define ROUND_UP_TO(x, d)  (DIV_ROUND_UP(x, (d)) * (d))
+
+/* -------------------------------------------------------------------------- */
+/* generic helper functions and macros */
+
+static inline u32 bit_mask(u8 position)
+{
+	return (u32)1 << position;
+}
+
+static inline bool dim_on_error(u8 error_id, const char *error_message)
+{
+	dimcb_on_error(error_id, error_message);
+	return false;
+}
+
+/* -------------------------------------------------------------------------- */
+/* types and local variables */
+
+struct async_tx_dbr {
+	u8 ch_addr;
+	u16 rpc;
+	u16 wpc;
+	u16 rest_size;
+	u16 sz_queue[CDT0_RPC_MASK + 1];
+};
+
+struct lld_global_vars_t {
+	bool dim_is_initialized;
+	bool mcm_is_initialized;
+	struct dim2_regs __iomem *dim2; /* DIM2 core base address */
+	struct async_tx_dbr atx_dbr;
+	u32 fcnt;
+	u32 dbr_map[DBR_MAP_SIZE];
+};
+
+static struct lld_global_vars_t g = { false };
+
+/* -------------------------------------------------------------------------- */
+
+static int dbr_get_mask_size(u16 size)
+{
+	int i;
+
+	for (i = 0; i < 6; i++)
+		if (size <= (DBR_BLOCK_SIZE << i))
+			return 1 << i;
+	return 0;
+}
+
+/**
+ * Allocates DBR memory.
+ * @param size Allocating memory size.
+ * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
+ */
+static int alloc_dbr(u16 size)
+{
+	int mask_size;
+	int i, block_idx = 0;
+
+	if (size <= 0)
+		return DBR_SIZE; /* out of memory */
+
+	mask_size = dbr_get_mask_size(size);
+	if (mask_size == 0)
+		return DBR_SIZE; /* out of memory */
+
+	for (i = 0; i < DBR_MAP_SIZE; i++) {
+		u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
+		u32 mask = ~((~(u32)0) << blocks);
+
+		do {
+			if ((g.dbr_map[i] & mask) == 0) {
+				g.dbr_map[i] |= mask;
+				return block_idx * DBR_BLOCK_SIZE;
+			}
+			block_idx += mask_size;
+			/* do shift left with 2 steps in case mask_size == 32 */
+			mask <<= mask_size - 1;
+		} while ((mask <<= 1) != 0);
+	}
+
+	return DBR_SIZE; /* out of memory */
+}
+
+static void free_dbr(int offs, int size)
+{
+	int block_idx = offs / DBR_BLOCK_SIZE;
+	u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
+	u32 mask = ~((~(u32)0) << blocks);
+
+	mask <<= block_idx % 32;
+	g.dbr_map[block_idx / 32] &= ~mask;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void dim2_transfer_madr(u32 val)
+{
+	dimcb_io_write(&g.dim2->MADR, val);
+
+	/* wait for transfer completion */
+	while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
+		continue;
+
+	dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
+}
+
+static void dim2_clear_dbr(u16 addr, u16 size)
+{
+	enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
+
+	u16 const end_addr = addr + size;
+	u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
+
+	dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
+	dimcb_io_write(&g.dim2->MDAT0, 0);
+
+	for (; addr < end_addr; addr++)
+		dim2_transfer_madr(cmd | addr);
+}
+
+static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
+{
+	dim2_transfer_madr(ctr_addr);
+
+	return dimcb_io_read((&g.dim2->MDAT0) + mdat_idx);
+}
+
+static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
+{
+	enum { MADR_WNR_BIT = 31 };
+
+	dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
+
+	if (mask[0] != 0)
+		dimcb_io_write(&g.dim2->MDAT0, value[0]);
+	if (mask[1] != 0)
+		dimcb_io_write(&g.dim2->MDAT1, value[1]);
+	if (mask[2] != 0)
+		dimcb_io_write(&g.dim2->MDAT2, value[2]);
+	if (mask[3] != 0)
+		dimcb_io_write(&g.dim2->MDAT3, value[3]);
+
+	dimcb_io_write(&g.dim2->MDWE0, mask[0]);
+	dimcb_io_write(&g.dim2->MDWE1, mask[1]);
+	dimcb_io_write(&g.dim2->MDWE2, mask[2]);
+	dimcb_io_write(&g.dim2->MDWE3, mask[3]);
+
+	dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
+}
+
+static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
+{
+	u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+	dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static inline void dim2_clear_ctr(u32 ctr_addr)
+{
+	u32 const value[4] = { 0, 0, 0, 0 };
+
+	dim2_write_ctr(ctr_addr, value);
+}
+
+static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
+			       bool read_not_write)
+{
+	bool isoc_fce = ch_type == CAT_CT_VAL_ISOC;
+	bool sync_mfe = ch_type == CAT_CT_VAL_SYNC;
+	u16 const cat =
+		(read_not_write << CAT_RNW_BIT) |
+		(ch_type << CAT_CT_SHIFT) |
+		(ch_addr << CAT_CL_SHIFT) |
+		(isoc_fce << CAT_FCE_BIT) |
+		(sync_mfe << CAT_MFE_BIT) |
+		(false << CAT_MT_BIT) |
+		(true << CAT_CE_BIT);
+	u8 const ctr_addr = cat_base + ch_addr / 8;
+	u8 const idx = (ch_addr % 8) / 2;
+	u8 const shift = (ch_addr % 2) * 16;
+	u32 mask[4] = { 0, 0, 0, 0 };
+	u32 value[4] = { 0, 0, 0, 0 };
+
+	mask[idx] = (u32)0xFFFF << shift;
+	value[idx] = cat << shift;
+	dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
+{
+	u8 const ctr_addr = cat_base + ch_addr / 8;
+	u8 const idx = (ch_addr % 8) / 2;
+	u8 const shift = (ch_addr % 2) * 16;
+	u32 mask[4] = { 0, 0, 0, 0 };
+	u32 value[4] = { 0, 0, 0, 0 };
+
+	mask[idx] = (u32)0xFFFF << shift;
+	dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
+			       u16 packet_length)
+{
+	u32 cdt[4] = { 0, 0, 0, 0 };
+
+	if (packet_length)
+		cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
+
+	cdt[3] =
+		((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
+		(dbr_address << CDT3_BA_SHIFT);
+	dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static u16 dim2_rpc(u8 ch_addr)
+{
+	u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
+
+	return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
+}
+
+static void dim2_clear_cdt(u8 ch_addr)
+{
+	u32 cdt[4] = { 0, 0, 0, 0 };
+
+	dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static void dim2_configure_adt(u8 ch_addr)
+{
+	u32 adt[4] = { 0, 0, 0, 0 };
+
+	adt[0] =
+		(true << ADT0_CE_BIT) |
+		(true << ADT0_LE_BIT) |
+		(0 << ADT0_PG_BIT);
+
+	dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_clear_adt(u8 ch_addr)
+{
+	u32 adt[4] = { 0, 0, 0, 0 };
+
+	dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
+				  u16 buffer_size)
+{
+	u8 const shift = idx * 16;
+
+	u32 mask[4] = { 0, 0, 0, 0 };
+	u32 adt[4] = { 0, 0, 0, 0 };
+
+	mask[1] =
+		bit_mask(ADT1_PS_BIT + shift) |
+		bit_mask(ADT1_RDY_BIT + shift) |
+		(ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+	adt[1] =
+		(true << (ADT1_PS_BIT + shift)) |
+		(true << (ADT1_RDY_BIT + shift)) |
+		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+	mask[idx + 2] = 0xFFFFFFFF;
+	adt[idx + 2] = buf_addr;
+
+	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
+				 u16 buffer_size)
+{
+	u8 const shift = idx * 16;
+
+	u32 mask[4] = { 0, 0, 0, 0 };
+	u32 adt[4] = { 0, 0, 0, 0 };
+
+	mask[1] =
+		bit_mask(ADT1_RDY_BIT + shift) |
+		(ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+	adt[1] =
+		(true << (ADT1_RDY_BIT + shift)) |
+		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+	mask[idx + 2] = 0xFFFFFFFF;
+	adt[idx + 2] = buf_addr;
+
+	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+static void dim2_clear_ctram(void)
+{
+	u32 ctr_addr;
+
+	for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
+		dim2_clear_ctr(ctr_addr);
+}
+
+static void dim2_configure_channel(
+	u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
+	u16 packet_length)
+{
+	dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
+	dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0);
+
+	dim2_configure_adt(ch_addr);
+	dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
+
+	/* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
+	dimcb_io_write(&g.dim2->ACMR0,
+		       dimcb_io_read(&g.dim2->ACMR0) | bit_mask(ch_addr));
+}
+
+static void dim2_clear_channel(u8 ch_addr)
+{
+	/* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
+	dimcb_io_write(&g.dim2->ACMR0,
+		       dimcb_io_read(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
+
+	dim2_clear_cat(AHB_CAT, ch_addr);
+	dim2_clear_adt(ch_addr);
+
+	dim2_clear_cat(MLB_CAT, ch_addr);
+	dim2_clear_cdt(ch_addr);
+
+	/* clear channel status bit */
+	dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
+}
+
+/* -------------------------------------------------------------------------- */
+/* trace async tx dbr fill state */
+
+static inline u16 norm_pc(u16 pc)
+{
+	return pc & CDT0_RPC_MASK;
+}
+
+static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
+{
+	g.atx_dbr.rest_size = dbr_size;
+	g.atx_dbr.rpc = dim2_rpc(ch_addr);
+	g.atx_dbr.wpc = g.atx_dbr.rpc;
+}
+
+static void dbrcnt_enq(int buf_sz)
+{
+	g.atx_dbr.rest_size -= buf_sz;
+	g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
+	g.atx_dbr.wpc++;
+}
+
+u16 dim_dbr_space(struct dim_channel *ch)
+{
+	u16 cur_rpc;
+	struct async_tx_dbr *dbr = &g.atx_dbr;
+
+	if (ch->addr != dbr->ch_addr)
+		return 0xFFFF;
+
+	cur_rpc = dim2_rpc(ch->addr);
+
+	while (norm_pc(dbr->rpc) != cur_rpc) {
+		dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
+		dbr->rpc++;
+	}
+
+	if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
+		return 0;
+
+	return dbr->rest_size;
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel state helpers */
+
+static void state_init(struct int_ch_state *state)
+{
+	state->request_counter = 0;
+	state->service_counter = 0;
+
+	state->idx1 = 0;
+	state->idx2 = 0;
+	state->level = 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* macro helper functions */
+
+static inline bool check_channel_address(u32 ch_address)
+{
+	return ch_address > 0 && (ch_address % 2) == 0 &&
+	       (ch_address / 2) <= (u32)CAT_CL_MASK;
+}
+
+static inline bool check_packet_length(u32 packet_length)
+{
+	u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
+
+	if (packet_length <= 0)
+		return false; /* too small */
+
+	if (packet_length > max_size)
+		return false; /* too big */
+
+	if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
+		return false; /* too big */
+
+	return true;
+}
+
+static inline bool check_bytes_per_frame(u32 bytes_per_frame)
+{
+	u16 const bd_factor = g.fcnt + 2;
+	u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
+
+	if (bytes_per_frame <= 0)
+		return false; /* too small */
+
+	if (bytes_per_frame > max_size)
+		return false; /* too big */
+
+	return true;
+}
+
+static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
+{
+	u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
+
+	if (buf_size > max_size)
+		return max_size;
+
+	return buf_size;
+}
+
+static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
+{
+	u16 n;
+	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+
+	if (buf_size > max_size)
+		buf_size = max_size;
+
+	n = buf_size / packet_length;
+
+	if (n < 2u)
+		return 0; /* too small buffer for given packet_length */
+
+	return packet_length * n;
+}
+
+static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
+{
+	u16 n;
+	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+	u32 const unit = bytes_per_frame << g.fcnt;
+
+	if (buf_size > max_size)
+		buf_size = max_size;
+
+	n = buf_size / unit;
+
+	if (n < 1u)
+		return 0; /* too small buffer for given bytes_per_frame */
+
+	return unit * n;
+}
+
+static void dim2_cleanup(void)
+{
+	/* disable MediaLB */
+	dimcb_io_write(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
+
+	dim2_clear_ctram();
+
+	/* disable mlb_int interrupt */
+	dimcb_io_write(&g.dim2->MIEN, 0);
+
+	/* clear status for all dma channels */
+	dimcb_io_write(&g.dim2->ACSR0, 0xFFFFFFFF);
+	dimcb_io_write(&g.dim2->ACSR1, 0xFFFFFFFF);
+
+	/* mask interrupts for all channels */
+	dimcb_io_write(&g.dim2->ACMR0, 0);
+	dimcb_io_write(&g.dim2->ACMR1, 0);
+}
+
+static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
+{
+	dim2_cleanup();
+
+	/* configure and enable MediaLB */
+	dimcb_io_write(&g.dim2->MLBC0,
+		       enable_6pin << MLBC0_MLBPEN_BIT |
+		       mlb_clock << MLBC0_MLBCLK_SHIFT |
+		       g.fcnt << MLBC0_FCNT_SHIFT |
+		       true << MLBC0_MLBEN_BIT);
+
+	/* activate all HBI channels */
+	dimcb_io_write(&g.dim2->HCMR0, 0xFFFFFFFF);
+	dimcb_io_write(&g.dim2->HCMR1, 0xFFFFFFFF);
+
+	/* enable HBI */
+	dimcb_io_write(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
+
+	/* configure DMA */
+	dimcb_io_write(&g.dim2->ACTL,
+		       ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
+		       true << ACTL_SCE_BIT);
+}
+
+static bool dim2_is_mlb_locked(void)
+{
+	u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
+	u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
+			  bit_mask(MLBC1_LOCKERR_BIT);
+	u32 const c1 = dimcb_io_read(&g.dim2->MLBC1);
+	u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
+
+	dimcb_io_write(&g.dim2->MLBC1, c1 & nda_mask);
+	return (dimcb_io_read(&g.dim2->MLBC1) & mask1) == 0 &&
+	       (dimcb_io_read(&g.dim2->MLBC0) & mask0) != 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel help routines */
+
+static inline bool service_channel(u8 ch_addr, u8 idx)
+{
+	u8 const shift = idx * 16;
+	u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
+	u32 mask[4] = { 0, 0, 0, 0 };
+	u32 adt_w[4] = { 0, 0, 0, 0 };
+
+	if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
+		return false;
+
+	mask[1] =
+		bit_mask(ADT1_DNE_BIT + shift) |
+		bit_mask(ADT1_ERR_BIT + shift) |
+		bit_mask(ADT1_RDY_BIT + shift);
+	dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
+
+	/* clear channel status bit */
+	dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
+
+	return true;
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel init routines */
+
+static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
+{
+	state_init(&ch->state);
+
+	ch->addr = ch_addr;
+
+	ch->packet_length = packet_length;
+	ch->bytes_per_frame = 0;
+	ch->done_sw_buffers_number = 0;
+}
+
+static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
+{
+	state_init(&ch->state);
+
+	ch->addr = ch_addr;
+
+	ch->packet_length = 0;
+	ch->bytes_per_frame = bytes_per_frame;
+	ch->done_sw_buffers_number = 0;
+}
+
+static void channel_init(struct dim_channel *ch, u8 ch_addr)
+{
+	state_init(&ch->state);
+
+	ch->addr = ch_addr;
+
+	ch->packet_length = 0;
+	ch->bytes_per_frame = 0;
+	ch->done_sw_buffers_number = 0;
+}
+
+/* returns true if channel interrupt state is cleared */
+static bool channel_service_interrupt(struct dim_channel *ch)
+{
+	struct int_ch_state *const state = &ch->state;
+
+	if (!service_channel(ch->addr, state->idx2))
+		return false;
+
+	state->idx2 ^= 1;
+	state->request_counter++;
+	return true;
+}
+
+static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
+{
+	struct int_ch_state *const state = &ch->state;
+
+	if (buf_size <= 0)
+		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
+
+	if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
+	    buf_size != norm_ctrl_async_buffer_size(buf_size))
+		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+				    "Bad control/async buffer size");
+
+	if (ch->packet_length &&
+	    buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
+		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+				    "Bad isochronous buffer size");
+
+	if (ch->bytes_per_frame &&
+	    buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
+		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+				    "Bad synchronous buffer size");
+
+	if (state->level >= 2u)
+		return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
+
+	++state->level;
+
+	if (ch->addr == g.atx_dbr.ch_addr)
+		dbrcnt_enq(buf_size);
+
+	if (ch->packet_length || ch->bytes_per_frame)
+		dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
+	else
+		dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
+				      buf_size);
+	state->idx1 ^= 1;
+
+	return true;
+}
+
+static u8 channel_service(struct dim_channel *ch)
+{
+	struct int_ch_state *const state = &ch->state;
+
+	if (state->service_counter != state->request_counter) {
+		state->service_counter++;
+		if (state->level == 0)
+			return DIM_ERR_UNDERFLOW;
+
+		--state->level;
+		ch->done_sw_buffers_number++;
+	}
+
+	return DIM_NO_ERROR;
+}
+
+static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
+{
+	if (buffers_number > ch->done_sw_buffers_number)
+		return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
+
+	ch->done_sw_buffers_number -= buffers_number;
+	return true;
+}
+
+/* -------------------------------------------------------------------------- */
+/* API */
+
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
+	       u32 fcnt)
+{
+	g.dim_is_initialized = false;
+
+	if (!dim_base_address)
+		return DIM_INIT_ERR_DIM_ADDR;
+
+	/* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
+	/* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
+	if (mlb_clock >= 8)
+		return DIM_INIT_ERR_MLB_CLOCK;
+
+	if (fcnt > MLBC0_FCNT_MAX_VAL)
+		return DIM_INIT_ERR_MLB_CLOCK;
+
+	g.dim2 = dim_base_address;
+	g.fcnt = fcnt;
+	g.dbr_map[0] = 0;
+	g.dbr_map[1] = 0;
+
+	dim2_initialize(mlb_clock >= 3, mlb_clock);
+
+	g.dim_is_initialized = true;
+
+	return DIM_NO_ERROR;
+}
+
+void dim_shutdown(void)
+{
+	g.dim_is_initialized = false;
+	dim2_cleanup();
+}
+
+bool dim_get_lock_state(void)
+{
+	return dim2_is_mlb_locked();
+}
+
+static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
+			  u16 ch_address, u16 hw_buffer_size)
+{
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	if (!check_channel_address(ch_address))
+		return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+	ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
+	ch->dbr_addr = alloc_dbr(ch->dbr_size);
+	if (ch->dbr_addr >= DBR_SIZE)
+		return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+	channel_init(ch, ch_address / 2);
+
+	dim2_configure_channel(ch->addr, type, is_tx,
+			       ch->dbr_addr, ch->dbr_size, 0);
+
+	return DIM_NO_ERROR;
+}
+
+void dim_service_mlb_int_irq(void)
+{
+	dimcb_io_write(&g.dim2->MS0, 0);
+	dimcb_io_write(&g.dim2->MS1, 0);
+}
+
+u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
+{
+	return norm_ctrl_async_buffer_size(buf_size);
+}
+
+/**
+ * Retrieves maximal possible correct buffer size for isochronous data type
+ * conform to given packet length and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
+{
+	if (!check_packet_length(packet_length))
+		return 0;
+
+	return norm_isoc_buffer_size(buf_size, packet_length);
+}
+
+/**
+ * Retrieves maximal possible correct buffer size for synchronous data type
+ * conform to given bytes per frame and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
+{
+	if (!check_bytes_per_frame(bytes_per_frame))
+		return 0;
+
+	return norm_sync_buffer_size(buf_size, bytes_per_frame);
+}
+
+u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		    u16 max_buffer_size)
+{
+	return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
+			       max_buffer_size);
+}
+
+u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		  u16 max_buffer_size)
+{
+	u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
+				 max_buffer_size);
+
+	if (is_tx && !g.atx_dbr.ch_addr) {
+		g.atx_dbr.ch_addr = ch->addr;
+		dbrcnt_init(ch->addr, ch->dbr_size);
+		dimcb_io_write(&g.dim2->MIEN, bit_mask(20));
+	}
+
+	return ret;
+}
+
+u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		 u16 packet_length)
+{
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	if (!check_channel_address(ch_address))
+		return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+	if (!check_packet_length(packet_length))
+		return DIM_ERR_BAD_CONFIG;
+
+	ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
+	ch->dbr_addr = alloc_dbr(ch->dbr_size);
+	if (ch->dbr_addr >= DBR_SIZE)
+		return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+	isoc_init(ch, ch_address / 2, packet_length);
+
+	dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
+			       ch->dbr_size, packet_length);
+
+	return DIM_NO_ERROR;
+}
+
+u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		 u16 bytes_per_frame)
+{
+	u16 bd_factor = g.fcnt + 2;
+
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	if (!check_channel_address(ch_address))
+		return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+	if (!check_bytes_per_frame(bytes_per_frame))
+		return DIM_ERR_BAD_CONFIG;
+
+	ch->dbr_size = bytes_per_frame << bd_factor;
+	ch->dbr_addr = alloc_dbr(ch->dbr_size);
+	if (ch->dbr_addr >= DBR_SIZE)
+		return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+	sync_init(ch, ch_address / 2, bytes_per_frame);
+
+	dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
+	dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
+			       ch->dbr_addr, ch->dbr_size, 0);
+
+	return DIM_NO_ERROR;
+}
+
+u8 dim_destroy_channel(struct dim_channel *ch)
+{
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	if (ch->addr == g.atx_dbr.ch_addr) {
+		dimcb_io_write(&g.dim2->MIEN, 0);
+		g.atx_dbr.ch_addr = 0;
+	}
+
+	dim2_clear_channel(ch->addr);
+	if (ch->dbr_addr < DBR_SIZE)
+		free_dbr(ch->dbr_addr, ch->dbr_size);
+	ch->dbr_addr = DBR_SIZE;
+
+	return DIM_NO_ERROR;
+}
+
+void dim_service_ahb_int_irq(struct dim_channel *const *channels)
+{
+	bool state_changed;
+
+	if (!g.dim_is_initialized) {
+		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+			     "DIM is not initialized");
+		return;
+	}
+
+	if (!channels) {
+		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
+		return;
+	}
+
+	/*
+	 * Use while-loop and a flag to make sure the age is changed back at
+	 * least once, otherwise the interrupt may never come if CPU generates
+	 * interrupt on changing age.
+	 * This cycle runs not more than number of channels, because
+	 * channel_service_interrupt() routine doesn't start the channel again.
+	 */
+	do {
+		struct dim_channel *const *ch = channels;
+
+		state_changed = false;
+
+		while (*ch) {
+			state_changed |= channel_service_interrupt(*ch);
+			++ch;
+		}
+	} while (state_changed);
+}
+
+u8 dim_service_channel(struct dim_channel *ch)
+{
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	return channel_service(ch);
+}
+
+struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
+					     struct dim_ch_state_t *state_ptr)
+{
+	if (!ch || !state_ptr)
+		return NULL;
+
+	state_ptr->ready = ch->state.level < 2;
+	state_ptr->done_buffers = ch->done_sw_buffers_number;
+
+	return state_ptr;
+}
+
+bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
+			u16 buffer_size)
+{
+	if (!ch)
+		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+				    "Bad channel");
+
+	return channel_start(ch, buffer_addr, buffer_size);
+}
+
+bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
+{
+	if (!ch)
+		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+				    "Bad channel");
+
+	return channel_detach_buffers(ch, buffers_number);
+}
diff --git a/drivers/staging/most/dim2/hal.h b/drivers/staging/most/dim2/hal.h
new file mode 100644
index 0000000..a08e4a4
--- /dev/null
+++ b/drivers/staging/most/dim2/hal.h
@@ -0,0 +1,112 @@
+/*
+ * hal.h - DIM2 HAL interface
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef _DIM2_HAL_H
+#define _DIM2_HAL_H
+
+#include <linux/types.h>
+#include "reg.h"
+
+/*
+ * The values below are specified in the hardware specification.
+ * So, they should not be changed until the hardware specification changes.
+ */
+enum mlb_clk_speed {
+	CLK_256FS = 0,
+	CLK_512FS = 1,
+	CLK_1024FS = 2,
+	CLK_2048FS = 3,
+	CLK_3072FS = 4,
+	CLK_4096FS = 5,
+	CLK_6144FS = 6,
+	CLK_8192FS = 7,
+};
+
+struct dim_ch_state_t {
+	bool ready; /* Shows readiness to enqueue next buffer */
+	u16 done_buffers; /* Number of completed buffers */
+};
+
+struct int_ch_state {
+	/* changed only in interrupt context */
+	volatile int request_counter;
+
+	/* changed only in task context */
+	volatile int service_counter;
+
+	u8 idx1;
+	u8 idx2;
+	u8 level; /* [0..2], buffering level */
+};
+
+struct dim_channel {
+	struct int_ch_state state;
+	u8 addr;
+	u16 dbr_addr;
+	u16 dbr_size;
+	u16 packet_length; /*< Isochronous packet length in bytes. */
+	u16 bytes_per_frame; /*< Synchronous bytes per frame. */
+	u16 done_sw_buffers_number; /*< Done software buffers number. */
+};
+
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
+	       u32 fcnt);
+
+void dim_shutdown(void);
+
+bool dim_get_lock_state(void);
+
+u16 dim_norm_ctrl_async_buffer_size(u16 buf_size);
+
+u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length);
+
+u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame);
+
+u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		    u16 max_buffer_size);
+
+u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		  u16 max_buffer_size);
+
+u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		 u16 packet_length);
+
+u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		 u16 bytes_per_frame);
+
+u8 dim_destroy_channel(struct dim_channel *ch);
+
+void dim_service_mlb_int_irq(void);
+
+void dim_service_ahb_int_irq(struct dim_channel *const *channels);
+
+u8 dim_service_channel(struct dim_channel *ch);
+
+struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
+					     struct dim_ch_state_t *state_ptr);
+
+u16 dim_dbr_space(struct dim_channel *ch);
+
+bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
+			u16 buffer_size);
+
+bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number);
+
+u32 dimcb_io_read(u32 __iomem *ptr32);
+
+void dimcb_io_write(u32 __iomem *ptr32, u32 value);
+
+void dimcb_on_error(u8 error_id, const char *error_message);
+
+#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/dim2/reg.h b/drivers/staging/most/dim2/reg.h
new file mode 100644
index 0000000..b0d095c
--- /dev/null
+++ b/drivers/staging/most/dim2/reg.h
@@ -0,0 +1,163 @@
+/*
+ * reg.h - Definitions for registers of DIM2
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef DIM2_OS62420_H
+#define	DIM2_OS62420_H
+
+#include <linux/types.h>
+
+struct dim2_regs {
+	/* 0x00 */ u32 MLBC0;
+	/* 0x01 */ u32 rsvd0[1];
+	/* 0x02 */ u32 MLBPC0;
+	/* 0x03 */ u32 MS0;
+	/* 0x04 */ u32 rsvd1[1];
+	/* 0x05 */ u32 MS1;
+	/* 0x06 */ u32 rsvd2[2];
+	/* 0x08 */ u32 MSS;
+	/* 0x09 */ u32 MSD;
+	/* 0x0A */ u32 rsvd3[1];
+	/* 0x0B */ u32 MIEN;
+	/* 0x0C */ u32 rsvd4[1];
+	/* 0x0D */ u32 MLBPC2;
+	/* 0x0E */ u32 MLBPC1;
+	/* 0x0F */ u32 MLBC1;
+	/* 0x10 */ u32 rsvd5[0x10];
+	/* 0x20 */ u32 HCTL;
+	/* 0x21 */ u32 rsvd6[1];
+	/* 0x22 */ u32 HCMR0;
+	/* 0x23 */ u32 HCMR1;
+	/* 0x24 */ u32 HCER0;
+	/* 0x25 */ u32 HCER1;
+	/* 0x26 */ u32 HCBR0;
+	/* 0x27 */ u32 HCBR1;
+	/* 0x28 */ u32 rsvd7[8];
+	/* 0x30 */ u32 MDAT0;
+	/* 0x31 */ u32 MDAT1;
+	/* 0x32 */ u32 MDAT2;
+	/* 0x33 */ u32 MDAT3;
+	/* 0x34 */ u32 MDWE0;
+	/* 0x35 */ u32 MDWE1;
+	/* 0x36 */ u32 MDWE2;
+	/* 0x37 */ u32 MDWE3;
+	/* 0x38 */ u32 MCTL;
+	/* 0x39 */ u32 MADR;
+	/* 0x3A */ u32 rsvd8[0xB6];
+	/* 0xF0 */ u32 ACTL;
+	/* 0xF1 */ u32 rsvd9[3];
+	/* 0xF4 */ u32 ACSR0;
+	/* 0xF5 */ u32 ACSR1;
+	/* 0xF6 */ u32 ACMR0;
+	/* 0xF7 */ u32 ACMR1;
+};
+
+#define DIM2_MASK(n)  (~((~(u32)0) << (n)))
+
+enum {
+	MLBC0_MLBLK_BIT = 7,
+
+	MLBC0_MLBPEN_BIT = 5,
+
+	MLBC0_MLBCLK_SHIFT = 2,
+	MLBC0_MLBCLK_VAL_256FS = 0,
+	MLBC0_MLBCLK_VAL_512FS = 1,
+	MLBC0_MLBCLK_VAL_1024FS = 2,
+	MLBC0_MLBCLK_VAL_2048FS = 3,
+
+	MLBC0_FCNT_SHIFT = 15,
+	MLBC0_FCNT_MASK = 7,
+	MLBC0_FCNT_MAX_VAL = 6,
+
+	MLBC0_MLBEN_BIT = 0,
+
+	MIEN_CTX_BREAK_BIT = 29,
+	MIEN_CTX_PE_BIT = 28,
+	MIEN_CTX_DONE_BIT = 27,
+
+	MIEN_CRX_BREAK_BIT = 26,
+	MIEN_CRX_PE_BIT = 25,
+	MIEN_CRX_DONE_BIT = 24,
+
+	MIEN_ATX_BREAK_BIT = 22,
+	MIEN_ATX_PE_BIT = 21,
+	MIEN_ATX_DONE_BIT = 20,
+
+	MIEN_ARX_BREAK_BIT = 19,
+	MIEN_ARX_PE_BIT = 18,
+	MIEN_ARX_DONE_BIT = 17,
+
+	MIEN_SYNC_PE_BIT = 16,
+
+	MIEN_ISOC_BUFO_BIT = 1,
+	MIEN_ISOC_PE_BIT = 0,
+
+	MLBC1_NDA_SHIFT = 8,
+	MLBC1_NDA_MASK = 0xFF,
+
+	MLBC1_CLKMERR_BIT = 7,
+	MLBC1_LOCKERR_BIT = 6,
+
+	ACTL_DMA_MODE_BIT = 2,
+	ACTL_DMA_MODE_VAL_DMA_MODE_0 = 0,
+	ACTL_DMA_MODE_VAL_DMA_MODE_1 = 1,
+	ACTL_SCE_BIT = 0,
+
+	HCTL_EN_BIT = 15
+};
+
+enum {
+	CDT0_RPC_SHIFT = 16 + 11,
+	CDT0_RPC_MASK = DIM2_MASK(5),
+
+	CDT1_BS_ISOC_SHIFT = 0,
+	CDT1_BS_ISOC_MASK = DIM2_MASK(9),
+
+	CDT3_BD_SHIFT = 0,
+	CDT3_BD_MASK = DIM2_MASK(12),
+	CDT3_BD_ISOC_MASK = DIM2_MASK(13),
+	CDT3_BA_SHIFT = 16,
+
+	ADT0_CE_BIT = 15,
+	ADT0_LE_BIT = 14,
+	ADT0_PG_BIT = 13,
+
+	ADT1_RDY_BIT = 15,
+	ADT1_DNE_BIT = 14,
+	ADT1_ERR_BIT = 13,
+	ADT1_PS_BIT = 12,
+	ADT1_MEP_BIT = 11,
+	ADT1_BD_SHIFT = 0,
+	ADT1_CTRL_ASYNC_BD_MASK = DIM2_MASK(11),
+	ADT1_ISOC_SYNC_BD_MASK = DIM2_MASK(13),
+
+	CAT_FCE_BIT = 14,
+	CAT_MFE_BIT = 14,
+
+	CAT_MT_BIT = 13,
+
+	CAT_RNW_BIT = 12,
+
+	CAT_CE_BIT = 11,
+
+	CAT_CT_SHIFT = 8,
+	CAT_CT_VAL_SYNC = 0,
+	CAT_CT_VAL_CONTROL = 1,
+	CAT_CT_VAL_ASYNC = 2,
+	CAT_CT_VAL_ISOC = 3,
+
+	CAT_CL_SHIFT = 0,
+	CAT_CL_MASK = DIM2_MASK(6)
+};
+
+#endif	/* DIM2_OS62420_H */
diff --git a/drivers/staging/most/dim2/sysfs.c b/drivers/staging/most/dim2/sysfs.c
new file mode 100644
index 0000000..1331cf0
--- /dev/null
+++ b/drivers/staging/most/dim2/sysfs.c
@@ -0,0 +1,115 @@
+/*
+ * sysfs.c - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@xxxxxx> */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include "sysfs.h"
+
+struct bus_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct medialb_bus *bus, char *buf);
+	ssize_t (*store)(struct medialb_bus *bus, const char *buf,
+			 size_t count);
+};
+
+static ssize_t state_show(struct medialb_bus *bus, char *buf)
+{
+	bool state = dim2_sysfs_get_state_cb();
+
+	return sprintf(buf, "%s\n", state ? "locked" : "");
+}
+
+static struct bus_attr state_attr = __ATTR_RO(state);
+
+static struct attribute *bus_default_attrs[] = {
+	&state_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group bus_attr_group = {
+	.attrs = bus_default_attrs,
+};
+
+static void bus_kobj_release(struct kobject *kobj)
+{
+}
+
+static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
+				  char *buf)
+{
+	struct medialb_bus *bus =
+		container_of(kobj, struct medialb_bus, kobj_group);
+	struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
+
+	if (!xattr->show)
+		return -EIO;
+
+	return xattr->show(bus, buf);
+}
+
+static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct medialb_bus *bus =
+		container_of(kobj, struct medialb_bus, kobj_group);
+	struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
+
+	if (!xattr->store)
+		return -EIO;
+
+	return xattr->store(bus, buf, count);
+}
+
+static struct sysfs_ops const bus_kobj_sysfs_ops = {
+	.show = bus_kobj_attr_show,
+	.store = bus_kobj_attr_store,
+};
+
+static struct kobj_type bus_ktype = {
+	.release = bus_kobj_release,
+	.sysfs_ops = &bus_kobj_sysfs_ops,
+};
+
+int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj)
+{
+	int err;
+
+	kobject_init(&bus->kobj_group, &bus_ktype);
+	err = kobject_add(&bus->kobj_group, parent_kobj, "bus");
+	if (err) {
+		pr_err("kobject_add() failed: %d\n", err);
+		goto err_kobject_add;
+	}
+
+	err = sysfs_create_group(&bus->kobj_group, &bus_attr_group);
+	if (err) {
+		pr_err("sysfs_create_group() failed: %d\n", err);
+		goto err_create_group;
+	}
+
+	return 0;
+
+err_create_group:
+	kobject_put(&bus->kobj_group);
+
+err_kobject_add:
+	return err;
+}
+
+void dim2_sysfs_destroy(struct medialb_bus *bus)
+{
+	kobject_put(&bus->kobj_group);
+}
diff --git a/drivers/staging/most/dim2/sysfs.h b/drivers/staging/most/dim2/sysfs.h
new file mode 100644
index 0000000..236de47
--- /dev/null
+++ b/drivers/staging/most/dim2/sysfs.h
@@ -0,0 +1,36 @@
+/*
+ * sysfs.h - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@xxxxxx> */
+
+#ifndef DIM2_SYSFS_H
+#define	DIM2_SYSFS_H
+
+#include <linux/kobject.h>
+
+struct medialb_bus {
+	struct kobject kobj_group;
+};
+
+struct dim2_hdm;
+
+int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj);
+void dim2_sysfs_destroy(struct medialb_bus *bus);
+
+/*
+ * callback,
+ * must deliver MediaLB state as true if locked or false if unlocked
+ */
+bool dim2_sysfs_get_state_cb(void);
+
+#endif	/* DIM2_SYSFS_H */
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
deleted file mode 100644
index 663bfeb..0000000
--- a/drivers/staging/most/hdm-dim2/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-#
-# MediaLB configuration
-#
-
-config HDM_DIM2
-	tristate "DIM2 HDM"
-	depends on HAS_IOMEM
-
-	---help---
-	  Say Y here if you want to connect via MediaLB to network transceiver.
-	  This device driver is platform dependent and needs an additional
-	  platform driver to be installed. For more information contact
-	  maintainer of this driver.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called hdm_dim2.
diff --git a/drivers/staging/most/hdm-dim2/Makefile b/drivers/staging/most/hdm-dim2/Makefile
deleted file mode 100644
index 6bbee87..0000000
--- a/drivers/staging/most/hdm-dim2/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_HDM_DIM2) += hdm_dim2.o
-
-hdm_dim2-objs := dim2_hdm.o dim2_hal.o dim2_sysfs.o
-ccflags-y += -Idrivers/staging/most/mostcore/
-ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-dim2/dim2_errors.h b/drivers/staging/most/hdm-dim2/dim2_errors.h
deleted file mode 100644
index 66343ba..0000000
--- a/drivers/staging/most/hdm-dim2/dim2_errors.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * dim2_errors.h - Definitions of errors for DIM2 HAL API
- * (MediaLB, Device Interface Macro IP, OS62420)
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#ifndef _MOST_DIM_ERRORS_H
-#define _MOST_DIM_ERRORS_H
-
-/**
- * MOST DIM errors.
- */
-enum dim_errors_t {
-	/** Not an error */
-	DIM_NO_ERROR = 0,
-
-	/** Bad base address for DIM2 IP */
-	DIM_INIT_ERR_DIM_ADDR = 0x10,
-
-	/**< Bad MediaLB clock */
-	DIM_INIT_ERR_MLB_CLOCK,
-
-	/** Bad channel address */
-	DIM_INIT_ERR_CHANNEL_ADDRESS,
-
-	/** Out of DBR memory */
-	DIM_INIT_ERR_OUT_OF_MEMORY,
-
-	/** DIM API is called while DIM is not initialized successfully */
-	DIM_ERR_DRIVER_NOT_INITIALIZED = 0x20,
-
-	/**
-	 * Configuration does not respect hardware limitations
-	 * for isochronous or synchronous channels
-	 */
-	DIM_ERR_BAD_CONFIG,
-
-	/**
-	 * Buffer size does not respect hardware limitations
-	 * for isochronous or synchronous channels
-	 */
-	DIM_ERR_BAD_BUFFER_SIZE,
-
-	DIM_ERR_UNDERFLOW,
-
-	DIM_ERR_OVERFLOW,
-};
-
-#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
deleted file mode 100644
index 9148464..0000000
--- a/drivers/staging/most/hdm-dim2/dim2_hal.c
+++ /dev/null
@@ -1,985 +0,0 @@
-/*
- * dim2_hal.c - DIM2 HAL implementation
- * (MediaLB, Device Interface Macro IP, OS62420)
- *
- * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/* Author: Andrey Shvetsov <andrey.shvetsov@xxxxxx> */
-
-#include "dim2_hal.h"
-#include "dim2_errors.h"
-#include "dim2_reg.h"
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-
-/*
- * Size factor for isochronous DBR buffer.
- * Minimal value is 3.
- */
-#define ISOC_DBR_FACTOR 3u
-
-/*
- * Number of 32-bit units for DBR map.
- *
- * 1: block size is 512, max allocation is 16K
- * 2: block size is 256, max allocation is 8K
- * 4: block size is 128, max allocation is 4K
- * 8: block size is 64, max allocation is 2K
- *
- * Min allocated space is block size.
- * Max possible allocated space is 32 blocks.
- */
-#define DBR_MAP_SIZE 2
-
-/* -------------------------------------------------------------------------- */
-/* not configurable area */
-
-#define CDT 0x00
-#define ADT 0x40
-#define MLB_CAT 0x80
-#define AHB_CAT 0x88
-
-#define DBR_SIZE  (16 * 1024) /* specified by IP */
-#define DBR_BLOCK_SIZE  (DBR_SIZE / 32 / DBR_MAP_SIZE)
-
-#define ROUND_UP_TO(x, d)  (DIV_ROUND_UP(x, (d)) * (d))
-
-/* -------------------------------------------------------------------------- */
-/* generic helper functions and macros */
-
-static inline u32 bit_mask(u8 position)
-{
-	return (u32)1 << position;
-}
-
-static inline bool dim_on_error(u8 error_id, const char *error_message)
-{
-	dimcb_on_error(error_id, error_message);
-	return false;
-}
-
-/* -------------------------------------------------------------------------- */
-/* types and local variables */
-
-struct async_tx_dbr {
-	u8 ch_addr;
-	u16 rpc;
-	u16 wpc;
-	u16 rest_size;
-	u16 sz_queue[CDT0_RPC_MASK + 1];
-};
-
-struct lld_global_vars_t {
-	bool dim_is_initialized;
-	bool mcm_is_initialized;
-	struct dim2_regs __iomem *dim2; /* DIM2 core base address */
-	struct async_tx_dbr atx_dbr;
-	u32 fcnt;
-	u32 dbr_map[DBR_MAP_SIZE];
-};
-
-static struct lld_global_vars_t g = { false };
-
-/* -------------------------------------------------------------------------- */
-
-static int dbr_get_mask_size(u16 size)
-{
-	int i;
-
-	for (i = 0; i < 6; i++)
-		if (size <= (DBR_BLOCK_SIZE << i))
-			return 1 << i;
-	return 0;
-}
-
-/**
- * Allocates DBR memory.
- * @param size Allocating memory size.
- * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
- */
-static int alloc_dbr(u16 size)
-{
-	int mask_size;
-	int i, block_idx = 0;
-
-	if (size <= 0)
-		return DBR_SIZE; /* out of memory */
-
-	mask_size = dbr_get_mask_size(size);
-	if (mask_size == 0)
-		return DBR_SIZE; /* out of memory */
-
-	for (i = 0; i < DBR_MAP_SIZE; i++) {
-		u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
-		u32 mask = ~((~(u32)0) << blocks);
-
-		do {
-			if ((g.dbr_map[i] & mask) == 0) {
-				g.dbr_map[i] |= mask;
-				return block_idx * DBR_BLOCK_SIZE;
-			}
-			block_idx += mask_size;
-			/* do shift left with 2 steps in case mask_size == 32 */
-			mask <<= mask_size - 1;
-		} while ((mask <<= 1) != 0);
-	}
-
-	return DBR_SIZE; /* out of memory */
-}
-
-static void free_dbr(int offs, int size)
-{
-	int block_idx = offs / DBR_BLOCK_SIZE;
-	u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
-	u32 mask = ~((~(u32)0) << blocks);
-
-	mask <<= block_idx % 32;
-	g.dbr_map[block_idx / 32] &= ~mask;
-}
-
-/* -------------------------------------------------------------------------- */
-
-static void dim2_transfer_madr(u32 val)
-{
-	dimcb_io_write(&g.dim2->MADR, val);
-
-	/* wait for transfer completion */
-	while ((dimcb_io_read(&g.dim2->MCTL) & 1) != 1)
-		continue;
-
-	dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
-}
-
-static void dim2_clear_dbr(u16 addr, u16 size)
-{
-	enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
-
-	u16 const end_addr = addr + size;
-	u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
-
-	dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
-	dimcb_io_write(&g.dim2->MDAT0, 0);
-
-	for (; addr < end_addr; addr++)
-		dim2_transfer_madr(cmd | addr);
-}
-
-static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
-{
-	dim2_transfer_madr(ctr_addr);
-
-	return dimcb_io_read((&g.dim2->MDAT0) + mdat_idx);
-}
-
-static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
-{
-	enum { MADR_WNR_BIT = 31 };
-
-	dimcb_io_write(&g.dim2->MCTL, 0);   /* clear transfer complete */
-
-	if (mask[0] != 0)
-		dimcb_io_write(&g.dim2->MDAT0, value[0]);
-	if (mask[1] != 0)
-		dimcb_io_write(&g.dim2->MDAT1, value[1]);
-	if (mask[2] != 0)
-		dimcb_io_write(&g.dim2->MDAT2, value[2]);
-	if (mask[3] != 0)
-		dimcb_io_write(&g.dim2->MDAT3, value[3]);
-
-	dimcb_io_write(&g.dim2->MDWE0, mask[0]);
-	dimcb_io_write(&g.dim2->MDWE1, mask[1]);
-	dimcb_io_write(&g.dim2->MDWE2, mask[2]);
-	dimcb_io_write(&g.dim2->MDWE3, mask[3]);
-
-	dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
-}
-
-static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
-{
-	u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
-
-	dim2_write_ctr_mask(ctr_addr, mask, value);
-}
-
-static inline void dim2_clear_ctr(u32 ctr_addr)
-{
-	u32 const value[4] = { 0, 0, 0, 0 };
-
-	dim2_write_ctr(ctr_addr, value);
-}
-
-static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
-			       bool read_not_write)
-{
-	bool isoc_fce = ch_type == CAT_CT_VAL_ISOC;
-	bool sync_mfe = ch_type == CAT_CT_VAL_SYNC;
-	u16 const cat =
-		(read_not_write << CAT_RNW_BIT) |
-		(ch_type << CAT_CT_SHIFT) |
-		(ch_addr << CAT_CL_SHIFT) |
-		(isoc_fce << CAT_FCE_BIT) |
-		(sync_mfe << CAT_MFE_BIT) |
-		(false << CAT_MT_BIT) |
-		(true << CAT_CE_BIT);
-	u8 const ctr_addr = cat_base + ch_addr / 8;
-	u8 const idx = (ch_addr % 8) / 2;
-	u8 const shift = (ch_addr % 2) * 16;
-	u32 mask[4] = { 0, 0, 0, 0 };
-	u32 value[4] = { 0, 0, 0, 0 };
-
-	mask[idx] = (u32)0xFFFF << shift;
-	value[idx] = cat << shift;
-	dim2_write_ctr_mask(ctr_addr, mask, value);
-}
-
-static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
-{
-	u8 const ctr_addr = cat_base + ch_addr / 8;
-	u8 const idx = (ch_addr % 8) / 2;
-	u8 const shift = (ch_addr % 2) * 16;
-	u32 mask[4] = { 0, 0, 0, 0 };
-	u32 value[4] = { 0, 0, 0, 0 };
-
-	mask[idx] = (u32)0xFFFF << shift;
-	dim2_write_ctr_mask(ctr_addr, mask, value);
-}
-
-static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
-			       u16 packet_length)
-{
-	u32 cdt[4] = { 0, 0, 0, 0 };
-
-	if (packet_length)
-		cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
-
-	cdt[3] =
-		((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
-		(dbr_address << CDT3_BA_SHIFT);
-	dim2_write_ctr(CDT + ch_addr, cdt);
-}
-
-static u16 dim2_rpc(u8 ch_addr)
-{
-	u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
-
-	return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
-}
-
-static void dim2_clear_cdt(u8 ch_addr)
-{
-	u32 cdt[4] = { 0, 0, 0, 0 };
-
-	dim2_write_ctr(CDT + ch_addr, cdt);
-}
-
-static void dim2_configure_adt(u8 ch_addr)
-{
-	u32 adt[4] = { 0, 0, 0, 0 };
-
-	adt[0] =
-		(true << ADT0_CE_BIT) |
-		(true << ADT0_LE_BIT) |
-		(0 << ADT0_PG_BIT);
-
-	dim2_write_ctr(ADT + ch_addr, adt);
-}
-
-static void dim2_clear_adt(u8 ch_addr)
-{
-	u32 adt[4] = { 0, 0, 0, 0 };
-
-	dim2_write_ctr(ADT + ch_addr, adt);
-}
-
-static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
-				  u16 buffer_size)
-{
-	u8 const shift = idx * 16;
-
-	u32 mask[4] = { 0, 0, 0, 0 };
-	u32 adt[4] = { 0, 0, 0, 0 };
-
-	mask[1] =
-		bit_mask(ADT1_PS_BIT + shift) |
-		bit_mask(ADT1_RDY_BIT + shift) |
-		(ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
-	adt[1] =
-		(true << (ADT1_PS_BIT + shift)) |
-		(true << (ADT1_RDY_BIT + shift)) |
-		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
-
-	mask[idx + 2] = 0xFFFFFFFF;
-	adt[idx + 2] = buf_addr;
-
-	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
-}
-
-static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
-				 u16 buffer_size)
-{
-	u8 const shift = idx * 16;
-
-	u32 mask[4] = { 0, 0, 0, 0 };
-	u32 adt[4] = { 0, 0, 0, 0 };
-
-	mask[1] =
-		bit_mask(ADT1_RDY_BIT + shift) |
-		(ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
-	adt[1] =
-		(true << (ADT1_RDY_BIT + shift)) |
-		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
-
-	mask[idx + 2] = 0xFFFFFFFF;
-	adt[idx + 2] = buf_addr;
-
-	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
-}
-
-static void dim2_clear_ctram(void)
-{
-	u32 ctr_addr;
-
-	for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
-		dim2_clear_ctr(ctr_addr);
-}
-
-static void dim2_configure_channel(
-	u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
-	u16 packet_length)
-{
-	dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
-	dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0);
-
-	dim2_configure_adt(ch_addr);
-	dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
-
-	/* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
-	dimcb_io_write(&g.dim2->ACMR0,
-		       dimcb_io_read(&g.dim2->ACMR0) | bit_mask(ch_addr));
-}
-
-static void dim2_clear_channel(u8 ch_addr)
-{
-	/* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
-	dimcb_io_write(&g.dim2->ACMR0,
-		       dimcb_io_read(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
-
-	dim2_clear_cat(AHB_CAT, ch_addr);
-	dim2_clear_adt(ch_addr);
-
-	dim2_clear_cat(MLB_CAT, ch_addr);
-	dim2_clear_cdt(ch_addr);
-
-	/* clear channel status bit */
-	dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
-}
-
-/* -------------------------------------------------------------------------- */
-/* trace async tx dbr fill state */
-
-static inline u16 norm_pc(u16 pc)
-{
-	return pc & CDT0_RPC_MASK;
-}
-
-static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
-{
-	g.atx_dbr.rest_size = dbr_size;
-	g.atx_dbr.rpc = dim2_rpc(ch_addr);
-	g.atx_dbr.wpc = g.atx_dbr.rpc;
-}
-
-static void dbrcnt_enq(int buf_sz)
-{
-	g.atx_dbr.rest_size -= buf_sz;
-	g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
-	g.atx_dbr.wpc++;
-}
-
-u16 dim_dbr_space(struct dim_channel *ch)
-{
-	u16 cur_rpc;
-	struct async_tx_dbr *dbr = &g.atx_dbr;
-
-	if (ch->addr != dbr->ch_addr)
-		return 0xFFFF;
-
-	cur_rpc = dim2_rpc(ch->addr);
-
-	while (norm_pc(dbr->rpc) != cur_rpc) {
-		dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
-		dbr->rpc++;
-	}
-
-	if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
-		return 0;
-
-	return dbr->rest_size;
-}
-
-/* -------------------------------------------------------------------------- */
-/* channel state helpers */
-
-static void state_init(struct int_ch_state *state)
-{
-	state->request_counter = 0;
-	state->service_counter = 0;
-
-	state->idx1 = 0;
-	state->idx2 = 0;
-	state->level = 0;
-}
-
-/* -------------------------------------------------------------------------- */
-/* macro helper functions */
-
-static inline bool check_channel_address(u32 ch_address)
-{
-	return ch_address > 0 && (ch_address % 2) == 0 &&
-	       (ch_address / 2) <= (u32)CAT_CL_MASK;
-}
-
-static inline bool check_packet_length(u32 packet_length)
-{
-	u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
-
-	if (packet_length <= 0)
-		return false; /* too small */
-
-	if (packet_length > max_size)
-		return false; /* too big */
-
-	if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
-		return false; /* too big */
-
-	return true;
-}
-
-static inline bool check_bytes_per_frame(u32 bytes_per_frame)
-{
-	u16 const bd_factor = g.fcnt + 2;
-	u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
-
-	if (bytes_per_frame <= 0)
-		return false; /* too small */
-
-	if (bytes_per_frame > max_size)
-		return false; /* too big */
-
-	return true;
-}
-
-static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
-{
-	u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
-
-	if (buf_size > max_size)
-		return max_size;
-
-	return buf_size;
-}
-
-static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
-{
-	u16 n;
-	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
-
-	if (buf_size > max_size)
-		buf_size = max_size;
-
-	n = buf_size / packet_length;
-
-	if (n < 2u)
-		return 0; /* too small buffer for given packet_length */
-
-	return packet_length * n;
-}
-
-static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
-{
-	u16 n;
-	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
-	u32 const unit = bytes_per_frame << g.fcnt;
-
-	if (buf_size > max_size)
-		buf_size = max_size;
-
-	n = buf_size / unit;
-
-	if (n < 1u)
-		return 0; /* too small buffer for given bytes_per_frame */
-
-	return unit * n;
-}
-
-static void dim2_cleanup(void)
-{
-	/* disable MediaLB */
-	dimcb_io_write(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
-
-	dim2_clear_ctram();
-
-	/* disable mlb_int interrupt */
-	dimcb_io_write(&g.dim2->MIEN, 0);
-
-	/* clear status for all dma channels */
-	dimcb_io_write(&g.dim2->ACSR0, 0xFFFFFFFF);
-	dimcb_io_write(&g.dim2->ACSR1, 0xFFFFFFFF);
-
-	/* mask interrupts for all channels */
-	dimcb_io_write(&g.dim2->ACMR0, 0);
-	dimcb_io_write(&g.dim2->ACMR1, 0);
-}
-
-static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
-{
-	dim2_cleanup();
-
-	/* configure and enable MediaLB */
-	dimcb_io_write(&g.dim2->MLBC0,
-		       enable_6pin << MLBC0_MLBPEN_BIT |
-		       mlb_clock << MLBC0_MLBCLK_SHIFT |
-		       g.fcnt << MLBC0_FCNT_SHIFT |
-		       true << MLBC0_MLBEN_BIT);
-
-	/* activate all HBI channels */
-	dimcb_io_write(&g.dim2->HCMR0, 0xFFFFFFFF);
-	dimcb_io_write(&g.dim2->HCMR1, 0xFFFFFFFF);
-
-	/* enable HBI */
-	dimcb_io_write(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
-
-	/* configure DMA */
-	dimcb_io_write(&g.dim2->ACTL,
-		       ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
-		       true << ACTL_SCE_BIT);
-}
-
-static bool dim2_is_mlb_locked(void)
-{
-	u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
-	u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
-			  bit_mask(MLBC1_LOCKERR_BIT);
-	u32 const c1 = dimcb_io_read(&g.dim2->MLBC1);
-	u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
-
-	dimcb_io_write(&g.dim2->MLBC1, c1 & nda_mask);
-	return (dimcb_io_read(&g.dim2->MLBC1) & mask1) == 0 &&
-	       (dimcb_io_read(&g.dim2->MLBC0) & mask0) != 0;
-}
-
-/* -------------------------------------------------------------------------- */
-/* channel help routines */
-
-static inline bool service_channel(u8 ch_addr, u8 idx)
-{
-	u8 const shift = idx * 16;
-	u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
-	u32 mask[4] = { 0, 0, 0, 0 };
-	u32 adt_w[4] = { 0, 0, 0, 0 };
-
-	if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
-		return false;
-
-	mask[1] =
-		bit_mask(ADT1_DNE_BIT + shift) |
-		bit_mask(ADT1_ERR_BIT + shift) |
-		bit_mask(ADT1_RDY_BIT + shift);
-	dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
-
-	/* clear channel status bit */
-	dimcb_io_write(&g.dim2->ACSR0, bit_mask(ch_addr));
-
-	return true;
-}
-
-/* -------------------------------------------------------------------------- */
-/* channel init routines */
-
-static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
-{
-	state_init(&ch->state);
-
-	ch->addr = ch_addr;
-
-	ch->packet_length = packet_length;
-	ch->bytes_per_frame = 0;
-	ch->done_sw_buffers_number = 0;
-}
-
-static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
-{
-	state_init(&ch->state);
-
-	ch->addr = ch_addr;
-
-	ch->packet_length = 0;
-	ch->bytes_per_frame = bytes_per_frame;
-	ch->done_sw_buffers_number = 0;
-}
-
-static void channel_init(struct dim_channel *ch, u8 ch_addr)
-{
-	state_init(&ch->state);
-
-	ch->addr = ch_addr;
-
-	ch->packet_length = 0;
-	ch->bytes_per_frame = 0;
-	ch->done_sw_buffers_number = 0;
-}
-
-/* returns true if channel interrupt state is cleared */
-static bool channel_service_interrupt(struct dim_channel *ch)
-{
-	struct int_ch_state *const state = &ch->state;
-
-	if (!service_channel(ch->addr, state->idx2))
-		return false;
-
-	state->idx2 ^= 1;
-	state->request_counter++;
-	return true;
-}
-
-static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
-{
-	struct int_ch_state *const state = &ch->state;
-
-	if (buf_size <= 0)
-		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
-
-	if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
-	    buf_size != norm_ctrl_async_buffer_size(buf_size))
-		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
-				    "Bad control/async buffer size");
-
-	if (ch->packet_length &&
-	    buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
-		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
-				    "Bad isochronous buffer size");
-
-	if (ch->bytes_per_frame &&
-	    buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
-		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
-				    "Bad synchronous buffer size");
-
-	if (state->level >= 2u)
-		return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
-
-	++state->level;
-
-	if (ch->addr == g.atx_dbr.ch_addr)
-		dbrcnt_enq(buf_size);
-
-	if (ch->packet_length || ch->bytes_per_frame)
-		dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
-	else
-		dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
-				      buf_size);
-	state->idx1 ^= 1;
-
-	return true;
-}
-
-static u8 channel_service(struct dim_channel *ch)
-{
-	struct int_ch_state *const state = &ch->state;
-
-	if (state->service_counter != state->request_counter) {
-		state->service_counter++;
-		if (state->level == 0)
-			return DIM_ERR_UNDERFLOW;
-
-		--state->level;
-		ch->done_sw_buffers_number++;
-	}
-
-	return DIM_NO_ERROR;
-}
-
-static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
-{
-	if (buffers_number > ch->done_sw_buffers_number)
-		return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
-
-	ch->done_sw_buffers_number -= buffers_number;
-	return true;
-}
-
-/* -------------------------------------------------------------------------- */
-/* API */
-
-u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
-	       u32 fcnt)
-{
-	g.dim_is_initialized = false;
-
-	if (!dim_base_address)
-		return DIM_INIT_ERR_DIM_ADDR;
-
-	/* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
-	/* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
-	if (mlb_clock >= 8)
-		return DIM_INIT_ERR_MLB_CLOCK;
-
-	if (fcnt > MLBC0_FCNT_MAX_VAL)
-		return DIM_INIT_ERR_MLB_CLOCK;
-
-	g.dim2 = dim_base_address;
-	g.fcnt = fcnt;
-	g.dbr_map[0] = 0;
-	g.dbr_map[1] = 0;
-
-	dim2_initialize(mlb_clock >= 3, mlb_clock);
-
-	g.dim_is_initialized = true;
-
-	return DIM_NO_ERROR;
-}
-
-void dim_shutdown(void)
-{
-	g.dim_is_initialized = false;
-	dim2_cleanup();
-}
-
-bool dim_get_lock_state(void)
-{
-	return dim2_is_mlb_locked();
-}
-
-static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
-			  u16 ch_address, u16 hw_buffer_size)
-{
-	if (!g.dim_is_initialized || !ch)
-		return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
-	if (!check_channel_address(ch_address))
-		return DIM_INIT_ERR_CHANNEL_ADDRESS;
-
-	ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
-	ch->dbr_addr = alloc_dbr(ch->dbr_size);
-	if (ch->dbr_addr >= DBR_SIZE)
-		return DIM_INIT_ERR_OUT_OF_MEMORY;
-
-	channel_init(ch, ch_address / 2);
-
-	dim2_configure_channel(ch->addr, type, is_tx,
-			       ch->dbr_addr, ch->dbr_size, 0);
-
-	return DIM_NO_ERROR;
-}
-
-void dim_service_mlb_int_irq(void)
-{
-	dimcb_io_write(&g.dim2->MS0, 0);
-	dimcb_io_write(&g.dim2->MS1, 0);
-}
-
-u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
-{
-	return norm_ctrl_async_buffer_size(buf_size);
-}
-
-/**
- * Retrieves maximal possible correct buffer size for isochronous data type
- * conform to given packet length and not bigger than given buffer size.
- *
- * Returns non-zero correct buffer size or zero by error.
- */
-u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
-{
-	if (!check_packet_length(packet_length))
-		return 0;
-
-	return norm_isoc_buffer_size(buf_size, packet_length);
-}
-
-/**
- * Retrieves maximal possible correct buffer size for synchronous data type
- * conform to given bytes per frame and not bigger than given buffer size.
- *
- * Returns non-zero correct buffer size or zero by error.
- */
-u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
-{
-	if (!check_bytes_per_frame(bytes_per_frame))
-		return 0;
-
-	return norm_sync_buffer_size(buf_size, bytes_per_frame);
-}
-
-u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
-		    u16 max_buffer_size)
-{
-	return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
-			       max_buffer_size);
-}
-
-u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
-		  u16 max_buffer_size)
-{
-	u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
-				 max_buffer_size);
-
-	if (is_tx && !g.atx_dbr.ch_addr) {
-		g.atx_dbr.ch_addr = ch->addr;
-		dbrcnt_init(ch->addr, ch->dbr_size);
-		dimcb_io_write(&g.dim2->MIEN, bit_mask(20));
-	}
-
-	return ret;
-}
-
-u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
-		 u16 packet_length)
-{
-	if (!g.dim_is_initialized || !ch)
-		return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
-	if (!check_channel_address(ch_address))
-		return DIM_INIT_ERR_CHANNEL_ADDRESS;
-
-	if (!check_packet_length(packet_length))
-		return DIM_ERR_BAD_CONFIG;
-
-	ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
-	ch->dbr_addr = alloc_dbr(ch->dbr_size);
-	if (ch->dbr_addr >= DBR_SIZE)
-		return DIM_INIT_ERR_OUT_OF_MEMORY;
-
-	isoc_init(ch, ch_address / 2, packet_length);
-
-	dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
-			       ch->dbr_size, packet_length);
-
-	return DIM_NO_ERROR;
-}
-
-u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
-		 u16 bytes_per_frame)
-{
-	u16 bd_factor = g.fcnt + 2;
-
-	if (!g.dim_is_initialized || !ch)
-		return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
-	if (!check_channel_address(ch_address))
-		return DIM_INIT_ERR_CHANNEL_ADDRESS;
-
-	if (!check_bytes_per_frame(bytes_per_frame))
-		return DIM_ERR_BAD_CONFIG;
-
-	ch->dbr_size = bytes_per_frame << bd_factor;
-	ch->dbr_addr = alloc_dbr(ch->dbr_size);
-	if (ch->dbr_addr >= DBR_SIZE)
-		return DIM_INIT_ERR_OUT_OF_MEMORY;
-
-	sync_init(ch, ch_address / 2, bytes_per_frame);
-
-	dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
-	dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
-			       ch->dbr_addr, ch->dbr_size, 0);
-
-	return DIM_NO_ERROR;
-}
-
-u8 dim_destroy_channel(struct dim_channel *ch)
-{
-	if (!g.dim_is_initialized || !ch)
-		return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
-	if (ch->addr == g.atx_dbr.ch_addr) {
-		dimcb_io_write(&g.dim2->MIEN, 0);
-		g.atx_dbr.ch_addr = 0;
-	}
-
-	dim2_clear_channel(ch->addr);
-	if (ch->dbr_addr < DBR_SIZE)
-		free_dbr(ch->dbr_addr, ch->dbr_size);
-	ch->dbr_addr = DBR_SIZE;
-
-	return DIM_NO_ERROR;
-}
-
-void dim_service_ahb_int_irq(struct dim_channel *const *channels)
-{
-	bool state_changed;
-
-	if (!g.dim_is_initialized) {
-		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
-			     "DIM is not initialized");
-		return;
-	}
-
-	if (!channels) {
-		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
-		return;
-	}
-
-	/*
-	 * Use while-loop and a flag to make sure the age is changed back at
-	 * least once, otherwise the interrupt may never come if CPU generates
-	 * interrupt on changing age.
-	 * This cycle runs not more than number of channels, because
-	 * channel_service_interrupt() routine doesn't start the channel again.
-	 */
-	do {
-		struct dim_channel *const *ch = channels;
-
-		state_changed = false;
-
-		while (*ch) {
-			state_changed |= channel_service_interrupt(*ch);
-			++ch;
-		}
-	} while (state_changed);
-}
-
-u8 dim_service_channel(struct dim_channel *ch)
-{
-	if (!g.dim_is_initialized || !ch)
-		return DIM_ERR_DRIVER_NOT_INITIALIZED;
-
-	return channel_service(ch);
-}
-
-struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
-					     struct dim_ch_state_t *state_ptr)
-{
-	if (!ch || !state_ptr)
-		return NULL;
-
-	state_ptr->ready = ch->state.level < 2;
-	state_ptr->done_buffers = ch->done_sw_buffers_number;
-
-	return state_ptr;
-}
-
-bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
-			u16 buffer_size)
-{
-	if (!ch)
-		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
-				    "Bad channel");
-
-	return channel_start(ch, buffer_addr, buffer_size);
-}
-
-bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
-{
-	if (!ch)
-		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
-				    "Bad channel");
-
-	return channel_detach_buffers(ch, buffers_number);
-}
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
deleted file mode 100644
index 6df6ea5..0000000
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * dim2_hal.h - DIM2 HAL interface
- * (MediaLB, Device Interface Macro IP, OS62420)
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#ifndef _DIM2_HAL_H
-#define _DIM2_HAL_H
-
-#include <linux/types.h>
-#include "dim2_reg.h"
-
-/*
- * The values below are specified in the hardware specification.
- * So, they should not be changed until the hardware specification changes.
- */
-enum mlb_clk_speed {
-	CLK_256FS = 0,
-	CLK_512FS = 1,
-	CLK_1024FS = 2,
-	CLK_2048FS = 3,
-	CLK_3072FS = 4,
-	CLK_4096FS = 5,
-	CLK_6144FS = 6,
-	CLK_8192FS = 7,
-};
-
-struct dim_ch_state_t {
-	bool ready; /* Shows readiness to enqueue next buffer */
-	u16 done_buffers; /* Number of completed buffers */
-};
-
-struct int_ch_state {
-	/* changed only in interrupt context */
-	volatile int request_counter;
-
-	/* changed only in task context */
-	volatile int service_counter;
-
-	u8 idx1;
-	u8 idx2;
-	u8 level; /* [0..2], buffering level */
-};
-
-struct dim_channel {
-	struct int_ch_state state;
-	u8 addr;
-	u16 dbr_addr;
-	u16 dbr_size;
-	u16 packet_length; /*< Isochronous packet length in bytes. */
-	u16 bytes_per_frame; /*< Synchronous bytes per frame. */
-	u16 done_sw_buffers_number; /*< Done software buffers number. */
-};
-
-u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
-	       u32 fcnt);
-
-void dim_shutdown(void);
-
-bool dim_get_lock_state(void);
-
-u16 dim_norm_ctrl_async_buffer_size(u16 buf_size);
-
-u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length);
-
-u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame);
-
-u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
-		    u16 max_buffer_size);
-
-u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
-		  u16 max_buffer_size);
-
-u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
-		 u16 packet_length);
-
-u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
-		 u16 bytes_per_frame);
-
-u8 dim_destroy_channel(struct dim_channel *ch);
-
-void dim_service_mlb_int_irq(void);
-
-void dim_service_ahb_int_irq(struct dim_channel *const *channels);
-
-u8 dim_service_channel(struct dim_channel *ch);
-
-struct dim_ch_state_t *dim_get_channel_state(struct dim_channel *ch,
-					     struct dim_ch_state_t *state_ptr);
-
-u16 dim_dbr_space(struct dim_channel *ch);
-
-bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
-			u16 buffer_size);
-
-bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number);
-
-u32 dimcb_io_read(u32 __iomem *ptr32);
-
-void dimcb_io_write(u32 __iomem *ptr32, u32 value);
-
-void dimcb_on_error(u8 error_id, const char *error_message);
-
-#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
deleted file mode 100644
index df7021c..0000000
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ /dev/null
@@ -1,918 +0,0 @@
-/*
- * dim2_hdm.c - MediaLB DIM2 Hardware Dependent Module
- *
- * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-
-#include <mostcore.h>
-#include "dim2_hal.h"
-#include "dim2_hdm.h"
-#include "dim2_errors.h"
-#include "dim2_sysfs.h"
-
-#define DMA_CHANNELS (32 - 1)  /* channel 0 is a system channel */
-
-#define MAX_BUFFERS_PACKET      32
-#define MAX_BUFFERS_STREAMING   32
-#define MAX_BUF_SIZE_PACKET     2048
-#define MAX_BUF_SIZE_STREAMING  (8 * 1024)
-
-/* command line parameter to select clock speed */
-static char *clock_speed;
-module_param(clock_speed, charp, 0000);
-MODULE_PARM_DESC(clock_speed, "MediaLB Clock Speed");
-
-/*
- * The parameter representing the number of frames per sub-buffer for
- * synchronous channels.  Valid values: [0 .. 6].
- *
- * The values 0, 1, 2, 3, 4, 5, 6 represent corresponding number of frames per
- * sub-buffer 1, 2, 4, 8, 16, 32, 64.
- */
-static u8 fcnt = 4;  /* (1 << fcnt) frames per subbuffer */
-module_param(fcnt, byte, 0000);
-MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a power of 2");
-
-static DEFINE_SPINLOCK(dim_lock);
-
-static void dim2_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
-
-/**
- * struct hdm_channel - private structure to keep channel specific data
- * @is_initialized: identifier to know whether the channel is initialized
- * @ch: HAL specific channel data
- * @pending_list: list to keep MBO's before starting transfer
- * @started_list: list to keep MBO's after starting transfer
- * @direction: channel direction (TX or RX)
- * @data_type: channel data type
- */
-struct hdm_channel {
-	char name[sizeof "caNNN"];
-	bool is_initialized;
-	struct dim_channel ch;
-	struct list_head pending_list;	/* before dim_enqueue_buffer() */
-	struct list_head started_list;	/* after dim_enqueue_buffer() */
-	enum most_channel_direction direction;
-	enum most_channel_data_type data_type;
-};
-
-/**
- * struct dim2_hdm - private structure to keep interface specific data
- * @hch: an array of channel specific data
- * @most_iface: most interface structure
- * @capabilities: an array of channel capability data
- * @io_base: I/O register base address
- * @clk_speed: user selectable (through command line parameter) clock speed
- * @netinfo_task: thread to deliver network status
- * @netinfo_waitq: waitq for the thread to sleep
- * @deliver_netinfo: to identify whether network status received
- * @mac_addrs: INIC mac address
- * @link_state: network link state
- * @atx_idx: index of async tx channel
- */
-struct dim2_hdm {
-	struct hdm_channel hch[DMA_CHANNELS];
-	struct most_channel_capability capabilities[DMA_CHANNELS];
-	struct most_interface most_iface;
-	char name[16 + sizeof "dim2-"];
-	void __iomem *io_base;
-	int clk_speed;
-	struct task_struct *netinfo_task;
-	wait_queue_head_t netinfo_waitq;
-	int deliver_netinfo;
-	unsigned char mac_addrs[6];
-	unsigned char link_state;
-	int atx_idx;
-	struct medialb_bus bus;
-	void (*on_netinfo)(struct most_interface *,
-			   unsigned char, unsigned char *);
-};
-
-#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
-
-/* Macro to identify a network status message */
-#define PACKET_IS_NET_INFO(p)  \
-	(((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
-	 ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
-
-bool dim2_sysfs_get_state_cb(void)
-{
-	bool state;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dim_lock, flags);
-	state = dim_get_lock_state();
-	spin_unlock_irqrestore(&dim_lock, flags);
-
-	return state;
-}
-
-/**
- * dimcb_io_read - callback from HAL to read an I/O register
- * @ptr32: register address
- */
-u32 dimcb_io_read(u32 __iomem *ptr32)
-{
-	return readl(ptr32);
-}
-
-/**
- * dimcb_io_write - callback from HAL to write value to an I/O register
- * @ptr32: register address
- * @value: value to write
- */
-void dimcb_io_write(u32 __iomem *ptr32, u32 value)
-{
-	writel(value, ptr32);
-}
-
-/**
- * dimcb_on_error - callback from HAL to report miscommunication between
- * HDM and HAL
- * @error_id: Error ID
- * @error_message: Error message. Some text in a free format
- */
-void dimcb_on_error(u8 error_id, const char *error_message)
-{
-	pr_err("dimcb_on_error: error_id - %d, error_message - %s\n", error_id,
-	       error_message);
-}
-
-/**
- * startup_dim - initialize the dim2 interface
- * @pdev: platform device
- *
- * Get the value of command line parameter "clock_speed" if given or use the
- * default value, enable the clock and PLL, and initialize the dim2 interface.
- */
-static int startup_dim(struct platform_device *pdev)
-{
-	struct dim2_hdm *dev = platform_get_drvdata(pdev);
-	struct dim2_platform_data *pdata = pdev->dev.platform_data;
-	u8 hal_ret;
-
-	dev->clk_speed = -1;
-
-	if (clock_speed) {
-		if (!strcmp(clock_speed, "256fs"))
-			dev->clk_speed = CLK_256FS;
-		else if (!strcmp(clock_speed, "512fs"))
-			dev->clk_speed = CLK_512FS;
-		else if (!strcmp(clock_speed, "1024fs"))
-			dev->clk_speed = CLK_1024FS;
-		else if (!strcmp(clock_speed, "2048fs"))
-			dev->clk_speed = CLK_2048FS;
-		else if (!strcmp(clock_speed, "3072fs"))
-			dev->clk_speed = CLK_3072FS;
-		else if (!strcmp(clock_speed, "4096fs"))
-			dev->clk_speed = CLK_4096FS;
-		else if (!strcmp(clock_speed, "6144fs"))
-			dev->clk_speed = CLK_6144FS;
-		else if (!strcmp(clock_speed, "8192fs"))
-			dev->clk_speed = CLK_8192FS;
-	}
-
-	if (dev->clk_speed == -1) {
-		pr_info("Bad or missing clock speed parameter, using default value: 3072fs\n");
-		dev->clk_speed = CLK_3072FS;
-	} else {
-		pr_info("Selected clock speed: %s\n", clock_speed);
-	}
-	if (pdata && pdata->init) {
-		int ret = pdata->init(pdata, dev->io_base, dev->clk_speed);
-
-		if (ret)
-			return ret;
-	}
-
-	pr_info("sync: num of frames per sub-buffer: %u\n", fcnt);
-	hal_ret = dim_startup(dev->io_base, dev->clk_speed, fcnt);
-	if (hal_ret != DIM_NO_ERROR) {
-		pr_err("dim_startup failed: %d\n", hal_ret);
-		if (pdata && pdata->destroy)
-			pdata->destroy(pdata);
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-/**
- * try_start_dim_transfer - try to transfer a buffer on a channel
- * @hdm_ch: channel specific data
- *
- * Transfer a buffer from pending_list if the channel is ready
- */
-static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
-{
-	u16 buf_size;
-	struct list_head *head = &hdm_ch->pending_list;
-	struct mbo *mbo;
-	unsigned long flags;
-	struct dim_ch_state_t st;
-
-	BUG_ON(!hdm_ch);
-	BUG_ON(!hdm_ch->is_initialized);
-
-	spin_lock_irqsave(&dim_lock, flags);
-	if (list_empty(head)) {
-		spin_unlock_irqrestore(&dim_lock, flags);
-		return -EAGAIN;
-	}
-
-	if (!dim_get_channel_state(&hdm_ch->ch, &st)->ready) {
-		spin_unlock_irqrestore(&dim_lock, flags);
-		return -EAGAIN;
-	}
-
-	mbo = list_first_entry(head, struct mbo, list);
-	buf_size = mbo->buffer_length;
-
-	if (dim_dbr_space(&hdm_ch->ch) < buf_size) {
-		spin_unlock_irqrestore(&dim_lock, flags);
-		return -EAGAIN;
-	}
-
-	BUG_ON(mbo->bus_address == 0);
-	if (!dim_enqueue_buffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
-		list_del(head->next);
-		spin_unlock_irqrestore(&dim_lock, flags);
-		mbo->processed_length = 0;
-		mbo->status = MBO_E_INVAL;
-		mbo->complete(mbo);
-		return -EFAULT;
-	}
-
-	list_move_tail(head->next, &hdm_ch->started_list);
-	spin_unlock_irqrestore(&dim_lock, flags);
-
-	return 0;
-}
-
-/**
- * deliver_netinfo_thread - thread to deliver network status to mostcore
- * @data: private data
- *
- * Wait for network status and deliver it to mostcore once it is received
- */
-static int deliver_netinfo_thread(void *data)
-{
-	struct dim2_hdm *dev = data;
-
-	while (!kthread_should_stop()) {
-		wait_event_interruptible(dev->netinfo_waitq,
-					 dev->deliver_netinfo ||
-					 kthread_should_stop());
-
-		if (dev->deliver_netinfo) {
-			dev->deliver_netinfo--;
-			if (dev->on_netinfo) {
-				dev->on_netinfo(&dev->most_iface,
-						dev->link_state,
-						dev->mac_addrs);
-			}
-		}
-	}
-
-	return 0;
-}
-
-/**
- * retrieve_netinfo - retrieve network status from received buffer
- * @dev: private data
- * @mbo: received MBO
- *
- * Parse the message in buffer and get node address, link state, MAC address.
- * Wake up a thread to deliver this status to mostcore
- */
-static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
-{
-	u8 *data = mbo->virt_address;
-
-	pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
-	dev->link_state = data[18];
-	pr_info("NIState: %d\n", dev->link_state);
-	memcpy(dev->mac_addrs, data + 19, 6);
-	dev->deliver_netinfo++;
-	wake_up_interruptible(&dev->netinfo_waitq);
-}
-
-/**
- * service_done_flag - handle completed buffers
- * @dev: private data
- * @ch_idx: channel index
- *
- * Return back the completed buffers to mostcore, using completion callback
- */
-static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
-{
-	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
-	struct dim_ch_state_t st;
-	struct list_head *head;
-	struct mbo *mbo;
-	int done_buffers;
-	unsigned long flags;
-	u8 *data;
-
-	BUG_ON(!hdm_ch);
-	BUG_ON(!hdm_ch->is_initialized);
-
-	spin_lock_irqsave(&dim_lock, flags);
-
-	done_buffers = dim_get_channel_state(&hdm_ch->ch, &st)->done_buffers;
-	if (!done_buffers) {
-		spin_unlock_irqrestore(&dim_lock, flags);
-		return;
-	}
-
-	if (!dim_detach_buffers(&hdm_ch->ch, done_buffers)) {
-		spin_unlock_irqrestore(&dim_lock, flags);
-		return;
-	}
-	spin_unlock_irqrestore(&dim_lock, flags);
-
-	head = &hdm_ch->started_list;
-
-	while (done_buffers) {
-		spin_lock_irqsave(&dim_lock, flags);
-		if (list_empty(head)) {
-			spin_unlock_irqrestore(&dim_lock, flags);
-			pr_crit("hard error: started_mbo list is empty whereas DIM2 has sent buffers\n");
-			break;
-		}
-
-		mbo = list_first_entry(head, struct mbo, list);
-		list_del(head->next);
-		spin_unlock_irqrestore(&dim_lock, flags);
-
-		data = mbo->virt_address;
-
-		if (hdm_ch->data_type == MOST_CH_ASYNC &&
-		    hdm_ch->direction == MOST_CH_RX &&
-		    PACKET_IS_NET_INFO(data)) {
-			retrieve_netinfo(dev, mbo);
-
-			spin_lock_irqsave(&dim_lock, flags);
-			list_add_tail(&mbo->list, &hdm_ch->pending_list);
-			spin_unlock_irqrestore(&dim_lock, flags);
-		} else {
-			if (hdm_ch->data_type == MOST_CH_CONTROL ||
-			    hdm_ch->data_type == MOST_CH_ASYNC) {
-				u32 const data_size =
-					(u32)data[0] * 256 + data[1] + 2;
-
-				mbo->processed_length =
-					min_t(u32, data_size,
-					      mbo->buffer_length);
-			} else {
-				mbo->processed_length = mbo->buffer_length;
-			}
-			mbo->status = MBO_SUCCESS;
-			mbo->complete(mbo);
-		}
-
-		done_buffers--;
-	}
-}
-
-static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
-						struct dim_channel **buffer)
-{
-	int idx = 0;
-	int ch_idx;
-
-	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
-		if (dev->hch[ch_idx].is_initialized)
-			buffer[idx++] = &dev->hch[ch_idx].ch;
-	}
-	buffer[idx++] = NULL;
-
-	return buffer;
-}
-
-static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
-{
-	struct dim2_hdm *dev = _dev;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dim_lock, flags);
-	dim_service_mlb_int_irq();
-	spin_unlock_irqrestore(&dim_lock, flags);
-
-	if (dev->atx_idx >= 0 && dev->hch[dev->atx_idx].is_initialized)
-		while (!try_start_dim_transfer(dev->hch + dev->atx_idx))
-			continue;
-
-	return IRQ_HANDLED;
-}
-
-/**
- * dim2_tasklet_fn - tasklet function
- * @data: private data
- *
- * Service each initialized channel, if needed
- */
-static void dim2_tasklet_fn(unsigned long data)
-{
-	struct dim2_hdm *dev = (struct dim2_hdm *)data;
-	unsigned long flags;
-	int ch_idx;
-
-	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
-		if (!dev->hch[ch_idx].is_initialized)
-			continue;
-
-		spin_lock_irqsave(&dim_lock, flags);
-		dim_service_channel(&dev->hch[ch_idx].ch);
-		spin_unlock_irqrestore(&dim_lock, flags);
-
-		service_done_flag(dev, ch_idx);
-		while (!try_start_dim_transfer(dev->hch + ch_idx))
-			continue;
-	}
-}
-
-/**
- * dim2_ahb_isr - interrupt service routine
- * @irq: irq number
- * @_dev: private data
- *
- * Acknowledge the interrupt and schedule a tasklet to service channels.
- * Return IRQ_HANDLED.
- */
-static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
-{
-	struct dim2_hdm *dev = _dev;
-	struct dim_channel *buffer[DMA_CHANNELS + 1];
-	unsigned long flags;
-
-	spin_lock_irqsave(&dim_lock, flags);
-	dim_service_ahb_int_irq(get_active_channels(dev, buffer));
-	spin_unlock_irqrestore(&dim_lock, flags);
-
-	dim2_tasklet.data = (unsigned long)dev;
-	tasklet_schedule(&dim2_tasklet);
-	return IRQ_HANDLED;
-}
-
-/**
- * complete_all_mbos - complete MBO's in a list
- * @head: list head
- *
- * Delete all the entries in list and return back MBO's to mostcore using
- * completion call back.
- */
-static void complete_all_mbos(struct list_head *head)
-{
-	unsigned long flags;
-	struct mbo *mbo;
-
-	for (;;) {
-		spin_lock_irqsave(&dim_lock, flags);
-		if (list_empty(head)) {
-			spin_unlock_irqrestore(&dim_lock, flags);
-			break;
-		}
-
-		mbo = list_first_entry(head, struct mbo, list);
-		list_del(head->next);
-		spin_unlock_irqrestore(&dim_lock, flags);
-
-		mbo->processed_length = 0;
-		mbo->status = MBO_E_CLOSE;
-		mbo->complete(mbo);
-	}
-}
-
-/**
- * configure_channel - initialize a channel
- * @iface: interface the channel belongs to
- * @channel: channel to be configured
- * @channel_config: structure that holds the configuration information
- *
- * Receives configuration information from mostcore and initialize
- * the corresponding channel. Return 0 on success, negative on failure.
- */
-static int configure_channel(struct most_interface *most_iface, int ch_idx,
-			     struct most_channel_config *ccfg)
-{
-	struct dim2_hdm *dev = iface_to_hdm(most_iface);
-	bool const is_tx = ccfg->direction == MOST_CH_TX;
-	u16 const sub_size = ccfg->subbuffer_size;
-	u16 const buf_size = ccfg->buffer_size;
-	u16 new_size;
-	unsigned long flags;
-	u8 hal_ret;
-	int const ch_addr = ch_idx * 2 + 2;
-	struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
-
-	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
-
-	if (hdm_ch->is_initialized)
-		return -EPERM;
-
-	switch (ccfg->data_type) {
-	case MOST_CH_CONTROL:
-		new_size = dim_norm_ctrl_async_buffer_size(buf_size);
-		if (new_size == 0) {
-			pr_err("%s: too small buffer size\n", hdm_ch->name);
-			return -EINVAL;
-		}
-		ccfg->buffer_size = new_size;
-		if (new_size != buf_size)
-			pr_warn("%s: fixed buffer size (%d -> %d)\n",
-				hdm_ch->name, buf_size, new_size);
-		spin_lock_irqsave(&dim_lock, flags);
-		hal_ret = dim_init_control(&hdm_ch->ch, is_tx, ch_addr,
-					   is_tx ? new_size * 2 : new_size);
-		break;
-	case MOST_CH_ASYNC:
-		new_size = dim_norm_ctrl_async_buffer_size(buf_size);
-		if (new_size == 0) {
-			pr_err("%s: too small buffer size\n", hdm_ch->name);
-			return -EINVAL;
-		}
-		ccfg->buffer_size = new_size;
-		if (new_size != buf_size)
-			pr_warn("%s: fixed buffer size (%d -> %d)\n",
-				hdm_ch->name, buf_size, new_size);
-		spin_lock_irqsave(&dim_lock, flags);
-		hal_ret = dim_init_async(&hdm_ch->ch, is_tx, ch_addr,
-					 is_tx ? new_size * 2 : new_size);
-		break;
-	case MOST_CH_ISOC:
-		new_size = dim_norm_isoc_buffer_size(buf_size, sub_size);
-		if (new_size == 0) {
-			pr_err("%s: invalid sub-buffer size or too small buffer size\n",
-			       hdm_ch->name);
-			return -EINVAL;
-		}
-		ccfg->buffer_size = new_size;
-		if (new_size != buf_size)
-			pr_warn("%s: fixed buffer size (%d -> %d)\n",
-				hdm_ch->name, buf_size, new_size);
-		spin_lock_irqsave(&dim_lock, flags);
-		hal_ret = dim_init_isoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
-		break;
-	case MOST_CH_SYNC:
-		new_size = dim_norm_sync_buffer_size(buf_size, sub_size);
-		if (new_size == 0) {
-			pr_err("%s: invalid sub-buffer size or too small buffer size\n",
-			       hdm_ch->name);
-			return -EINVAL;
-		}
-		ccfg->buffer_size = new_size;
-		if (new_size != buf_size)
-			pr_warn("%s: fixed buffer size (%d -> %d)\n",
-				hdm_ch->name, buf_size, new_size);
-		spin_lock_irqsave(&dim_lock, flags);
-		hal_ret = dim_init_sync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
-		break;
-	default:
-		pr_err("%s: configure failed, bad channel type: %d\n",
-		       hdm_ch->name, ccfg->data_type);
-		return -EINVAL;
-	}
-
-	if (hal_ret != DIM_NO_ERROR) {
-		spin_unlock_irqrestore(&dim_lock, flags);
-		pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
-		       hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
-		return -ENODEV;
-	}
-
-	hdm_ch->data_type = ccfg->data_type;
-	hdm_ch->direction = ccfg->direction;
-	hdm_ch->is_initialized = true;
-
-	if (hdm_ch->data_type == MOST_CH_ASYNC &&
-	    hdm_ch->direction == MOST_CH_TX &&
-	    dev->atx_idx < 0)
-		dev->atx_idx = ch_idx;
-
-	spin_unlock_irqrestore(&dim_lock, flags);
-
-	return 0;
-}
-
-/**
- * enqueue - enqueue a buffer for data transfer
- * @iface: intended interface
- * @channel: ID of the channel the buffer is intended for
- * @mbo: pointer to the buffer object
- *
- * Push the buffer into pending_list and try to transfer one buffer from
- * pending_list. Return 0 on success, negative on failure.
- */
-static int enqueue(struct most_interface *most_iface, int ch_idx,
-		   struct mbo *mbo)
-{
-	struct dim2_hdm *dev = iface_to_hdm(most_iface);
-	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
-	unsigned long flags;
-
-	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
-
-	if (!hdm_ch->is_initialized)
-		return -EPERM;
-
-	if (mbo->bus_address == 0)
-		return -EFAULT;
-
-	spin_lock_irqsave(&dim_lock, flags);
-	list_add_tail(&mbo->list, &hdm_ch->pending_list);
-	spin_unlock_irqrestore(&dim_lock, flags);
-
-	(void)try_start_dim_transfer(hdm_ch);
-
-	return 0;
-}
-
-/**
- * request_netinfo - triggers retrieving of network info
- * @iface: pointer to the interface
- * @channel_id: corresponding channel ID
- *
- * Send a command to INIC which triggers retrieving of network info by means of
- * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
- */
-static void request_netinfo(struct most_interface *most_iface, int ch_idx,
-			    void (*on_netinfo)(struct most_interface *,
-					       unsigned char, unsigned char *))
-{
-	struct dim2_hdm *dev = iface_to_hdm(most_iface);
-	struct mbo *mbo;
-	u8 *data;
-
-	dev->on_netinfo = on_netinfo;
-	if (!on_netinfo)
-		return;
-
-	if (dev->atx_idx < 0) {
-		pr_err("Async Tx Not initialized\n");
-		return;
-	}
-
-	mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
-	if (!mbo)
-		return;
-
-	mbo->buffer_length = 5;
-
-	data = mbo->virt_address;
-
-	data[0] = 0x00; /* PML High byte */
-	data[1] = 0x03; /* PML Low byte */
-	data[2] = 0x02; /* PMHL */
-	data[3] = 0x08; /* FPH */
-	data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
-
-	most_submit_mbo(mbo);
-}
-
-/**
- * poison_channel - poison buffers of a channel
- * @iface: pointer to the interface the channel to be poisoned belongs to
- * @channel_id: corresponding channel ID
- *
- * Destroy a channel and complete all the buffers in both started_list &
- * pending_list. Return 0 on success, negative on failure.
- */
-static int poison_channel(struct most_interface *most_iface, int ch_idx)
-{
-	struct dim2_hdm *dev = iface_to_hdm(most_iface);
-	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
-	unsigned long flags;
-	u8 hal_ret;
-	int ret = 0;
-
-	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
-
-	if (!hdm_ch->is_initialized)
-		return -EPERM;
-
-	tasklet_disable(&dim2_tasklet);
-	spin_lock_irqsave(&dim_lock, flags);
-	hal_ret = dim_destroy_channel(&hdm_ch->ch);
-	hdm_ch->is_initialized = false;
-	if (ch_idx == dev->atx_idx)
-		dev->atx_idx = -1;
-	spin_unlock_irqrestore(&dim_lock, flags);
-	tasklet_enable(&dim2_tasklet);
-	if (hal_ret != DIM_NO_ERROR) {
-		pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
-		ret = -EFAULT;
-	}
-
-	complete_all_mbos(&hdm_ch->started_list);
-	complete_all_mbos(&hdm_ch->pending_list);
-
-	return ret;
-}
-
-/*
- * dim2_probe - dim2 probe handler
- * @pdev: platform device structure
- *
- * Register the dim2 interface with mostcore and initialize it.
- * Return 0 on success, negative on failure.
- */
-static int dim2_probe(struct platform_device *pdev)
-{
-	struct dim2_hdm *dev;
-	struct resource *res;
-	int ret, i;
-	struct kobject *kobj;
-	int irq;
-
-	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
-	if (!dev)
-		return -ENOMEM;
-
-	dev->atx_idx = -1;
-
-	platform_set_drvdata(pdev, dev);
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	dev->io_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(dev->io_base))
-		return PTR_ERR(dev->io_base);
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "failed to get ahb0_int irq: %d\n", irq);
-		return irq;
-	}
-
-	ret = devm_request_irq(&pdev->dev, irq, dim2_ahb_isr, 0,
-			       "dim2_ahb0_int", dev);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
-		return ret;
-	}
-
-	irq = platform_get_irq(pdev, 1);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "failed to get mlb_int irq: %d\n", irq);
-		return irq;
-	}
-
-	ret = devm_request_irq(&pdev->dev, irq, dim2_mlb_isr, 0,
-			       "dim2_mlb_int", dev);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
-		return ret;
-	}
-
-	init_waitqueue_head(&dev->netinfo_waitq);
-	dev->deliver_netinfo = 0;
-	dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
-					"dim2_netinfo");
-	if (IS_ERR(dev->netinfo_task))
-		return PTR_ERR(dev->netinfo_task);
-
-	for (i = 0; i < DMA_CHANNELS; i++) {
-		struct most_channel_capability *cap = dev->capabilities + i;
-		struct hdm_channel *hdm_ch = dev->hch + i;
-
-		INIT_LIST_HEAD(&hdm_ch->pending_list);
-		INIT_LIST_HEAD(&hdm_ch->started_list);
-		hdm_ch->is_initialized = false;
-		snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
-
-		cap->name_suffix = hdm_ch->name;
-		cap->direction = MOST_CH_RX | MOST_CH_TX;
-		cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
-				 MOST_CH_ISOC | MOST_CH_SYNC;
-		cap->num_buffers_packet = MAX_BUFFERS_PACKET;
-		cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
-		cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
-		cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
-	}
-
-	{
-		const char *fmt;
-
-		if (sizeof(res->start) == sizeof(long long))
-			fmt = "dim2-%016llx";
-		else if (sizeof(res->start) == sizeof(long))
-			fmt = "dim2-%016lx";
-		else
-			fmt = "dim2-%016x";
-
-		snprintf(dev->name, sizeof(dev->name), fmt, res->start);
-	}
-
-	dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
-	dev->most_iface.description = dev->name;
-	dev->most_iface.num_channels = DMA_CHANNELS;
-	dev->most_iface.channel_vector = dev->capabilities;
-	dev->most_iface.configure = configure_channel;
-	dev->most_iface.enqueue = enqueue;
-	dev->most_iface.poison_channel = poison_channel;
-	dev->most_iface.request_netinfo = request_netinfo;
-
-	kobj = most_register_interface(&dev->most_iface);
-	if (IS_ERR(kobj)) {
-		ret = PTR_ERR(kobj);
-		dev_err(&pdev->dev, "failed to register MOST interface\n");
-		goto err_stop_thread;
-	}
-
-	ret = dim2_sysfs_probe(&dev->bus, kobj);
-	if (ret)
-		goto err_unreg_iface;
-
-	ret = startup_dim(pdev);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to initialize DIM2\n");
-		goto err_destroy_bus;
-	}
-
-	return 0;
-
-err_destroy_bus:
-	dim2_sysfs_destroy(&dev->bus);
-err_unreg_iface:
-	most_deregister_interface(&dev->most_iface);
-err_stop_thread:
-	kthread_stop(dev->netinfo_task);
-
-	return ret;
-}
-
-/**
- * dim2_remove - dim2 remove handler
- * @pdev: platform device structure
- *
- * Unregister the interface from mostcore
- */
-static int dim2_remove(struct platform_device *pdev)
-{
-	struct dim2_hdm *dev = platform_get_drvdata(pdev);
-	struct dim2_platform_data *pdata = pdev->dev.platform_data;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dim_lock, flags);
-	dim_shutdown();
-	spin_unlock_irqrestore(&dim_lock, flags);
-
-	if (pdata && pdata->destroy)
-		pdata->destroy(pdata);
-
-	dim2_sysfs_destroy(&dev->bus);
-	most_deregister_interface(&dev->most_iface);
-	kthread_stop(dev->netinfo_task);
-
-	/*
-	 * break link to local platform_device_id struct
-	 * to prevent crash by unload platform device module
-	 */
-	pdev->id_entry = NULL;
-
-	return 0;
-}
-
-static const struct platform_device_id dim2_id[] = {
-	{ "medialb_dim2" },
-	{ }, /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(platform, dim2_id);
-
-static struct platform_driver dim2_driver = {
-	.probe = dim2_probe,
-	.remove = dim2_remove,
-	.id_table = dim2_id,
-	.driver = {
-		.name = "hdm_dim2",
-	},
-};
-
-module_platform_driver(dim2_driver);
-
-MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@xxxxxxxxxxxxx>");
-MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@xxxxxx>");
-MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.h b/drivers/staging/most/hdm-dim2/dim2_hdm.h
deleted file mode 100644
index 4050e7c..0000000
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * dim2_hdm.h - MediaLB DIM2 HDM Header
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#ifndef DIM2_HDM_H
-#define	DIM2_HDM_H
-
-struct device;
-
-/* platform dependent data for dim2 interface */
-struct dim2_platform_data {
-	int (*init)(struct dim2_platform_data *pd, void __iomem *io_base,
-		    int clk_speed);
-	void (*destroy)(struct dim2_platform_data *pd);
-	void *priv;
-};
-
-#endif	/* DIM2_HDM_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
deleted file mode 100644
index f7d9fbc..0000000
--- a/drivers/staging/most/hdm-dim2/dim2_reg.h
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * dim2_reg.h - Definitions for registers of DIM2
- * (MediaLB, Device Interface Macro IP, OS62420)
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#ifndef DIM2_OS62420_H
-#define	DIM2_OS62420_H
-
-#include <linux/types.h>
-
-struct dim2_regs {
-	/* 0x00 */ u32 MLBC0;
-	/* 0x01 */ u32 rsvd0[1];
-	/* 0x02 */ u32 MLBPC0;
-	/* 0x03 */ u32 MS0;
-	/* 0x04 */ u32 rsvd1[1];
-	/* 0x05 */ u32 MS1;
-	/* 0x06 */ u32 rsvd2[2];
-	/* 0x08 */ u32 MSS;
-	/* 0x09 */ u32 MSD;
-	/* 0x0A */ u32 rsvd3[1];
-	/* 0x0B */ u32 MIEN;
-	/* 0x0C */ u32 rsvd4[1];
-	/* 0x0D */ u32 MLBPC2;
-	/* 0x0E */ u32 MLBPC1;
-	/* 0x0F */ u32 MLBC1;
-	/* 0x10 */ u32 rsvd5[0x10];
-	/* 0x20 */ u32 HCTL;
-	/* 0x21 */ u32 rsvd6[1];
-	/* 0x22 */ u32 HCMR0;
-	/* 0x23 */ u32 HCMR1;
-	/* 0x24 */ u32 HCER0;
-	/* 0x25 */ u32 HCER1;
-	/* 0x26 */ u32 HCBR0;
-	/* 0x27 */ u32 HCBR1;
-	/* 0x28 */ u32 rsvd7[8];
-	/* 0x30 */ u32 MDAT0;
-	/* 0x31 */ u32 MDAT1;
-	/* 0x32 */ u32 MDAT2;
-	/* 0x33 */ u32 MDAT3;
-	/* 0x34 */ u32 MDWE0;
-	/* 0x35 */ u32 MDWE1;
-	/* 0x36 */ u32 MDWE2;
-	/* 0x37 */ u32 MDWE3;
-	/* 0x38 */ u32 MCTL;
-	/* 0x39 */ u32 MADR;
-	/* 0x3A */ u32 rsvd8[0xB6];
-	/* 0xF0 */ u32 ACTL;
-	/* 0xF1 */ u32 rsvd9[3];
-	/* 0xF4 */ u32 ACSR0;
-	/* 0xF5 */ u32 ACSR1;
-	/* 0xF6 */ u32 ACMR0;
-	/* 0xF7 */ u32 ACMR1;
-};
-
-#define DIM2_MASK(n)  (~((~(u32)0) << (n)))
-
-enum {
-	MLBC0_MLBLK_BIT = 7,
-
-	MLBC0_MLBPEN_BIT = 5,
-
-	MLBC0_MLBCLK_SHIFT = 2,
-	MLBC0_MLBCLK_VAL_256FS = 0,
-	MLBC0_MLBCLK_VAL_512FS = 1,
-	MLBC0_MLBCLK_VAL_1024FS = 2,
-	MLBC0_MLBCLK_VAL_2048FS = 3,
-
-	MLBC0_FCNT_SHIFT = 15,
-	MLBC0_FCNT_MASK = 7,
-	MLBC0_FCNT_MAX_VAL = 6,
-
-	MLBC0_MLBEN_BIT = 0,
-
-	MIEN_CTX_BREAK_BIT = 29,
-	MIEN_CTX_PE_BIT = 28,
-	MIEN_CTX_DONE_BIT = 27,
-
-	MIEN_CRX_BREAK_BIT = 26,
-	MIEN_CRX_PE_BIT = 25,
-	MIEN_CRX_DONE_BIT = 24,
-
-	MIEN_ATX_BREAK_BIT = 22,
-	MIEN_ATX_PE_BIT = 21,
-	MIEN_ATX_DONE_BIT = 20,
-
-	MIEN_ARX_BREAK_BIT = 19,
-	MIEN_ARX_PE_BIT = 18,
-	MIEN_ARX_DONE_BIT = 17,
-
-	MIEN_SYNC_PE_BIT = 16,
-
-	MIEN_ISOC_BUFO_BIT = 1,
-	MIEN_ISOC_PE_BIT = 0,
-
-	MLBC1_NDA_SHIFT = 8,
-	MLBC1_NDA_MASK = 0xFF,
-
-	MLBC1_CLKMERR_BIT = 7,
-	MLBC1_LOCKERR_BIT = 6,
-
-	ACTL_DMA_MODE_BIT = 2,
-	ACTL_DMA_MODE_VAL_DMA_MODE_0 = 0,
-	ACTL_DMA_MODE_VAL_DMA_MODE_1 = 1,
-	ACTL_SCE_BIT = 0,
-
-	HCTL_EN_BIT = 15
-};
-
-enum {
-	CDT0_RPC_SHIFT = 16 + 11,
-	CDT0_RPC_MASK = DIM2_MASK(5),
-
-	CDT1_BS_ISOC_SHIFT = 0,
-	CDT1_BS_ISOC_MASK = DIM2_MASK(9),
-
-	CDT3_BD_SHIFT = 0,
-	CDT3_BD_MASK = DIM2_MASK(12),
-	CDT3_BD_ISOC_MASK = DIM2_MASK(13),
-	CDT3_BA_SHIFT = 16,
-
-	ADT0_CE_BIT = 15,
-	ADT0_LE_BIT = 14,
-	ADT0_PG_BIT = 13,
-
-	ADT1_RDY_BIT = 15,
-	ADT1_DNE_BIT = 14,
-	ADT1_ERR_BIT = 13,
-	ADT1_PS_BIT = 12,
-	ADT1_MEP_BIT = 11,
-	ADT1_BD_SHIFT = 0,
-	ADT1_CTRL_ASYNC_BD_MASK = DIM2_MASK(11),
-	ADT1_ISOC_SYNC_BD_MASK = DIM2_MASK(13),
-
-	CAT_FCE_BIT = 14,
-	CAT_MFE_BIT = 14,
-
-	CAT_MT_BIT = 13,
-
-	CAT_RNW_BIT = 12,
-
-	CAT_CE_BIT = 11,
-
-	CAT_CT_SHIFT = 8,
-	CAT_CT_VAL_SYNC = 0,
-	CAT_CT_VAL_CONTROL = 1,
-	CAT_CT_VAL_ASYNC = 2,
-	CAT_CT_VAL_ISOC = 3,
-
-	CAT_CL_SHIFT = 0,
-	CAT_CL_MASK = DIM2_MASK(6)
-};
-
-#endif	/* DIM2_OS62420_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
deleted file mode 100644
index d8b22f9..0000000
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * dim2_sysfs.c - MediaLB sysfs information
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/* Author: Andrey Shvetsov <andrey.shvetsov@xxxxxx> */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include "dim2_sysfs.h"
-
-struct bus_attr {
-	struct attribute attr;
-	ssize_t (*show)(struct medialb_bus *bus, char *buf);
-	ssize_t (*store)(struct medialb_bus *bus, const char *buf,
-			 size_t count);
-};
-
-static ssize_t state_show(struct medialb_bus *bus, char *buf)
-{
-	bool state = dim2_sysfs_get_state_cb();
-
-	return sprintf(buf, "%s\n", state ? "locked" : "");
-}
-
-static struct bus_attr state_attr = __ATTR_RO(state);
-
-static struct attribute *bus_default_attrs[] = {
-	&state_attr.attr,
-	NULL,
-};
-
-static const struct attribute_group bus_attr_group = {
-	.attrs = bus_default_attrs,
-};
-
-static void bus_kobj_release(struct kobject *kobj)
-{
-}
-
-static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
-				  char *buf)
-{
-	struct medialb_bus *bus =
-		container_of(kobj, struct medialb_bus, kobj_group);
-	struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
-
-	if (!xattr->show)
-		return -EIO;
-
-	return xattr->show(bus, buf);
-}
-
-static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
-				   const char *buf, size_t count)
-{
-	struct medialb_bus *bus =
-		container_of(kobj, struct medialb_bus, kobj_group);
-	struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
-
-	if (!xattr->store)
-		return -EIO;
-
-	return xattr->store(bus, buf, count);
-}
-
-static struct sysfs_ops const bus_kobj_sysfs_ops = {
-	.show = bus_kobj_attr_show,
-	.store = bus_kobj_attr_store,
-};
-
-static struct kobj_type bus_ktype = {
-	.release = bus_kobj_release,
-	.sysfs_ops = &bus_kobj_sysfs_ops,
-};
-
-int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj)
-{
-	int err;
-
-	kobject_init(&bus->kobj_group, &bus_ktype);
-	err = kobject_add(&bus->kobj_group, parent_kobj, "bus");
-	if (err) {
-		pr_err("kobject_add() failed: %d\n", err);
-		goto err_kobject_add;
-	}
-
-	err = sysfs_create_group(&bus->kobj_group, &bus_attr_group);
-	if (err) {
-		pr_err("sysfs_create_group() failed: %d\n", err);
-		goto err_create_group;
-	}
-
-	return 0;
-
-err_create_group:
-	kobject_put(&bus->kobj_group);
-
-err_kobject_add:
-	return err;
-}
-
-void dim2_sysfs_destroy(struct medialb_bus *bus)
-{
-	kobject_put(&bus->kobj_group);
-}
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.h b/drivers/staging/most/hdm-dim2/dim2_sysfs.h
deleted file mode 100644
index b71dd02..0000000
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * dim2_sysfs.h - MediaLB sysfs information
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/* Author: Andrey Shvetsov <andrey.shvetsov@xxxxxx> */
-
-#ifndef DIM2_SYSFS_H
-#define	DIM2_SYSFS_H
-
-#include <linux/kobject.h>
-
-struct medialb_bus {
-	struct kobject kobj_group;
-};
-
-struct dim2_hdm;
-
-int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj);
-void dim2_sysfs_destroy(struct medialb_bus *bus);
-
-/*
- * callback,
- * must deliver MediaLB state as true if locked or false if unlocked
- */
-bool dim2_sysfs_get_state_cb(void);
-
-#endif	/* DIM2_SYSFS_H */
diff --git a/drivers/staging/most/hdm-i2c/Kconfig b/drivers/staging/most/hdm-i2c/Kconfig
deleted file mode 100644
index 6fd7983..0000000
--- a/drivers/staging/most/hdm-i2c/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# MOST I2C configuration
-#
-
-config HDM_I2C
-	tristate "I2C HDM"
-	depends on I2C
-	---help---
-	  Say Y here if you want to connect via I2C to network tranceiver.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called hdm_i2c.
diff --git a/drivers/staging/most/hdm-i2c/Makefile b/drivers/staging/most/hdm-i2c/Makefile
deleted file mode 100644
index 03a4a59..0000000
--- a/drivers/staging/most/hdm-i2c/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_HDM_I2C) += hdm_i2c.o
-
-ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/hdm-i2c/hdm_i2c.c b/drivers/staging/most/hdm-i2c/hdm_i2c.c
deleted file mode 100644
index 2b4de40..0000000
--- a/drivers/staging/most/hdm-i2c/hdm_i2c.c
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * hdm_i2c.c - Hardware Dependent Module for I2C Interface
- *
- * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-
-#include <mostcore.h>
-
-enum { CH_RX, CH_TX, NUM_CHANNELS };
-
-#define MAX_BUFFERS_CONTROL 32
-#define MAX_BUF_SIZE_CONTROL 256
-
-/**
- * list_first_mbo - get the first mbo from a list
- * @ptr:	the list head to take the mbo from.
- */
-#define list_first_mbo(ptr) \
-	list_first_entry(ptr, struct mbo, list)
-
-/* IRQ / Polling option */
-static bool polling_req;
-module_param(polling_req, bool, 0444);
-MODULE_PARM_DESC(polling_req, "Request Polling. Default = 0 (use irq)");
-
-/* Polling Rate */
-static int scan_rate = 100;
-module_param(scan_rate, int, 0644);
-MODULE_PARM_DESC(scan_rate, "Polling rate in times/sec. Default = 100");
-
-struct hdm_i2c {
-	bool is_open[NUM_CHANNELS];
-	bool polling_mode;
-	struct most_interface most_iface;
-	struct most_channel_capability capabilities[NUM_CHANNELS];
-	struct i2c_client *client;
-	struct rx {
-		struct delayed_work dwork;
-		wait_queue_head_t waitq;
-		struct list_head list;
-		struct mutex list_mutex;
-	} rx;
-	char name[64];
-};
-
-#define to_hdm(iface) container_of(iface, struct hdm_i2c, most_iface)
-
-/**
- * configure_channel - called from MOST core to configure a channel
- * @iface: interface the channel belongs to
- * @channel: channel to be configured
- * @channel_config: structure that holds the configuration information
- *
- * Return 0 on success, negative on failure.
- *
- * Receives configuration information from MOST core and initialize the
- * corresponding channel.
- */
-static int configure_channel(struct most_interface *most_iface,
-			     int ch_idx,
-			     struct most_channel_config *channel_config)
-{
-	struct hdm_i2c *dev = to_hdm(most_iface);
-
-	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
-	BUG_ON(dev->is_open[ch_idx]);
-
-	if (channel_config->data_type != MOST_CH_CONTROL) {
-		pr_err("bad data type for channel %d\n", ch_idx);
-		return -EPERM;
-	}
-
-	if (channel_config->direction != dev->capabilities[ch_idx].direction) {
-		pr_err("bad direction for channel %d\n", ch_idx);
-		return -EPERM;
-	}
-
-	if ((channel_config->direction == MOST_CH_RX) && (dev->polling_mode)) {
-		schedule_delayed_work(&dev->rx.dwork,
-				      msecs_to_jiffies(MSEC_PER_SEC / 4));
-	}
-	dev->is_open[ch_idx] = true;
-
-	return 0;
-}
-
-/**
- * enqueue - called from MOST core to enqueue a buffer for data transfer
- * @iface: intended interface
- * @channel: ID of the channel the buffer is intended for
- * @mbo: pointer to the buffer object
- *
- * Return 0 on success, negative on failure.
- *
- * Transmit the data over I2C if it is a "write" request or push the buffer into
- * list if it is an "read" request
- */
-static int enqueue(struct most_interface *most_iface,
-		   int ch_idx, struct mbo *mbo)
-{
-	struct hdm_i2c *dev = to_hdm(most_iface);
-	int ret;
-
-	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
-	BUG_ON(!dev->is_open[ch_idx]);
-
-	if (ch_idx == CH_RX) {
-		/* RX */
-		mutex_lock(&dev->rx.list_mutex);
-		list_add_tail(&mbo->list, &dev->rx.list);
-		mutex_unlock(&dev->rx.list_mutex);
-		wake_up_interruptible(&dev->rx.waitq);
-	} else {
-		/* TX */
-		ret = i2c_master_send(dev->client, mbo->virt_address,
-				      mbo->buffer_length);
-		if (ret <= 0) {
-			mbo->processed_length = 0;
-			mbo->status = MBO_E_INVAL;
-		} else {
-			mbo->processed_length = mbo->buffer_length;
-			mbo->status = MBO_SUCCESS;
-		}
-		mbo->complete(mbo);
-	}
-
-	return 0;
-}
-
-/**
- * poison_channel - called from MOST core to poison buffers of a channel
- * @iface: pointer to the interface the channel to be poisoned belongs to
- * @channel_id: corresponding channel ID
- *
- * Return 0 on success, negative on failure.
- *
- * If channel direction is RX, complete the buffers in list with
- * status MBO_E_CLOSE
- */
-static int poison_channel(struct most_interface *most_iface,
-			  int ch_idx)
-{
-	struct hdm_i2c *dev = to_hdm(most_iface);
-	struct mbo *mbo;
-
-	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
-	BUG_ON(!dev->is_open[ch_idx]);
-
-	dev->is_open[ch_idx] = false;
-
-	if (ch_idx == CH_RX) {
-		mutex_lock(&dev->rx.list_mutex);
-		while (!list_empty(&dev->rx.list)) {
-			mbo = list_first_mbo(&dev->rx.list);
-			list_del(&mbo->list);
-			mutex_unlock(&dev->rx.list_mutex);
-
-			mbo->processed_length = 0;
-			mbo->status = MBO_E_CLOSE;
-			mbo->complete(mbo);
-
-			mutex_lock(&dev->rx.list_mutex);
-		}
-		mutex_unlock(&dev->rx.list_mutex);
-		wake_up_interruptible(&dev->rx.waitq);
-	}
-
-	return 0;
-}
-
-static void do_rx_work(struct hdm_i2c *dev)
-{
-	struct mbo *mbo;
-	unsigned char msg[MAX_BUF_SIZE_CONTROL];
-	int ret, ch_idx = CH_RX;
-	u16 pml, data_size;
-
-	/* Read PML (2 bytes) */
-	ret = i2c_master_recv(dev->client, msg, 2);
-	if (ret <= 0) {
-		pr_err("Failed to receive PML\n");
-		return;
-	}
-
-	pml = (msg[0] << 8) | msg[1];
-	if (!pml)
-		return;
-
-	data_size = pml + 2;
-
-	/* Read the whole message, including PML */
-	ret = i2c_master_recv(dev->client, msg, data_size);
-	if (ret <= 0) {
-		pr_err("Failed to receive a Port Message\n");
-		return;
-	}
-
-	for (;;) {
-		/* Conditions to wait for: poisoned channel or free buffer
-		 * available for reading
-		 */
-		if (wait_event_interruptible(dev->rx.waitq,
-					     !dev->is_open[ch_idx] ||
-					     !list_empty(&dev->rx.list))) {
-			pr_err("wait_event_interruptible() failed\n");
-			return;
-		}
-
-		if (!dev->is_open[ch_idx])
-			return;
-
-		mutex_lock(&dev->rx.list_mutex);
-
-		/* list may be empty if poison or remove is called */
-		if (!list_empty(&dev->rx.list))
-			break;
-
-		mutex_unlock(&dev->rx.list_mutex);
-	}
-
-	mbo = list_first_mbo(&dev->rx.list);
-	list_del(&mbo->list);
-	mutex_unlock(&dev->rx.list_mutex);
-
-	mbo->processed_length = min(data_size, mbo->buffer_length);
-	memcpy(mbo->virt_address, msg, mbo->processed_length);
-	mbo->status = MBO_SUCCESS;
-	mbo->complete(mbo);
-}
-
-/**
- * pending_rx_work - Read pending messages through I2C
- * @work: definition of this work item
- *
- * Invoked by the Interrupt Service Routine, most_irq_handler()
- */
-static void pending_rx_work(struct work_struct *work)
-{
-	struct hdm_i2c *dev = container_of(work, struct hdm_i2c, rx.dwork.work);
-
-	do_rx_work(dev);
-
-	if (dev->polling_mode) {
-		if (dev->is_open[CH_RX])
-			schedule_delayed_work(&dev->rx.dwork,
-					      msecs_to_jiffies(MSEC_PER_SEC
-							       / scan_rate));
-	} else {
-		enable_irq(dev->client->irq);
-	}
-}
-
-/*
- * most_irq_handler - Interrupt Service Routine
- * @irq: irq number
- * @_dev: private data
- *
- * Schedules a delayed work
- *
- * By default the interrupt line behavior is Active Low. Once an interrupt is
- * generated by the device, until driver clears the interrupt (by reading
- * the PMP message), device keeps the interrupt line in low state. Since i2c
- * read is done in work queue, the interrupt line must be disabled temporarily
- * to avoid ISR being called repeatedly. Re-enable the interrupt in workqueue,
- * after reading the message.
- *
- * Note: If we use the interrupt line in Falling edge mode, there is a
- * possibility to miss interrupts when ISR is getting executed.
- *
- */
-static irqreturn_t most_irq_handler(int irq, void *_dev)
-{
-	struct hdm_i2c *dev = _dev;
-
-	disable_irq_nosync(irq);
-
-	schedule_delayed_work(&dev->rx.dwork, 0);
-
-	return IRQ_HANDLED;
-}
-
-/*
- * i2c_probe - i2c probe handler
- * @client: i2c client device structure
- * @id: i2c client device id
- *
- * Return 0 on success, negative on failure.
- *
- * Register the i2c client device as a MOST interface
- */
-static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
-{
-	struct hdm_i2c *dev;
-	int ret, i;
-	struct kobject *kobj;
-
-	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-	if (!dev)
-		return -ENOMEM;
-
-	/* ID format: i2c-<bus>-<address> */
-	snprintf(dev->name, sizeof(dev->name), "i2c-%d-%04x",
-		 client->adapter->nr, client->addr);
-
-	for (i = 0; i < NUM_CHANNELS; i++) {
-		dev->is_open[i] = false;
-		dev->capabilities[i].data_type = MOST_CH_CONTROL;
-		dev->capabilities[i].num_buffers_packet = MAX_BUFFERS_CONTROL;
-		dev->capabilities[i].buffer_size_packet = MAX_BUF_SIZE_CONTROL;
-	}
-	dev->capabilities[CH_RX].direction = MOST_CH_RX;
-	dev->capabilities[CH_RX].name_suffix = "rx";
-	dev->capabilities[CH_TX].direction = MOST_CH_TX;
-	dev->capabilities[CH_TX].name_suffix = "tx";
-
-	dev->most_iface.interface = ITYPE_I2C;
-	dev->most_iface.description = dev->name;
-	dev->most_iface.num_channels = NUM_CHANNELS;
-	dev->most_iface.channel_vector = dev->capabilities;
-	dev->most_iface.configure = configure_channel;
-	dev->most_iface.enqueue = enqueue;
-	dev->most_iface.poison_channel = poison_channel;
-
-	INIT_LIST_HEAD(&dev->rx.list);
-	mutex_init(&dev->rx.list_mutex);
-	init_waitqueue_head(&dev->rx.waitq);
-
-	INIT_DELAYED_WORK(&dev->rx.dwork, pending_rx_work);
-
-	dev->client = client;
-	i2c_set_clientdata(client, dev);
-
-	kobj = most_register_interface(&dev->most_iface);
-	if (IS_ERR(kobj)) {
-		pr_err("Failed to register i2c as a MOST interface\n");
-		kfree(dev);
-		return PTR_ERR(kobj);
-	}
-
-	dev->polling_mode = polling_req || client->irq <= 0;
-	if (!dev->polling_mode) {
-		pr_info("Requesting IRQ: %d\n", client->irq);
-		ret = request_irq(client->irq, most_irq_handler, 0,
-				  client->name, dev);
-		if (ret) {
-			pr_info("IRQ request failed: %d, falling back to polling\n",
-				ret);
-			dev->polling_mode = true;
-		}
-	}
-
-	if (dev->polling_mode)
-		pr_info("Using polling at rate: %d times/sec\n", scan_rate);
-
-	return 0;
-}
-
-/*
- * i2c_remove - i2c remove handler
- * @client: i2c client device structure
- *
- * Return 0 on success.
- *
- * Unregister the i2c client device as a MOST interface
- */
-static int i2c_remove(struct i2c_client *client)
-{
-	struct hdm_i2c *dev = i2c_get_clientdata(client);
-	int i;
-
-	if (!dev->polling_mode)
-		free_irq(client->irq, dev);
-
-	most_deregister_interface(&dev->most_iface);
-
-	for (i = 0 ; i < NUM_CHANNELS; i++)
-		if (dev->is_open[i])
-			poison_channel(&dev->most_iface, i);
-	cancel_delayed_work_sync(&dev->rx.dwork);
-	kfree(dev);
-
-	return 0;
-}
-
-static const struct i2c_device_id i2c_id[] = {
-	{ "most_i2c", 0 },
-	{ }, /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(i2c, i2c_id);
-
-static struct i2c_driver i2c_driver = {
-	.driver = {
-		.name = "hdm_i2c",
-	},
-	.probe = i2c_probe,
-	.remove = i2c_remove,
-	.id_table = i2c_id,
-};
-
-module_i2c_driver(i2c_driver);
-
-MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@xxxxxxxxxxxxx>");
-MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@xxxxxx>");
-MODULE_DESCRIPTION("I2C Hardware Dependent Module");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
deleted file mode 100644
index 487f1f3..0000000
--- a/drivers/staging/most/hdm-usb/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# MOST USB configuration
-#
-
-config HDM_USB
-	tristate "USB HDM"
-	depends on USB && NET
-
-	---help---
-	  Say Y here if you want to connect via USB to network tranceiver.
-	  This device driver depends on the networking AIM.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called hdm_usb.
diff --git a/drivers/staging/most/hdm-usb/Makefile b/drivers/staging/most/hdm-usb/Makefile
deleted file mode 100644
index 6bbacb4..0000000
--- a/drivers/staging/most/hdm-usb/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_HDM_USB) += hdm_usb.o
-
-ccflags-y += -Idrivers/staging/most/mostcore/
-ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
deleted file mode 100644
index 85775da..0000000
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ /dev/null
@@ -1,1307 +0,0 @@
-/*
- * hdm_usb.c - Hardware dependent module for USB
- *
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/sysfs.h>
-#include <linux/dma-mapping.h>
-#include <linux/etherdevice.h>
-#include <linux/uaccess.h>
-#include "mostcore.h"
-
-#define USB_MTU			512
-#define NO_ISOCHRONOUS_URB	0
-#define AV_PACKETS_PER_XACT	2
-#define BUF_CHAIN_SIZE		0xFFFF
-#define MAX_NUM_ENDPOINTS	30
-#define MAX_SUFFIX_LEN		10
-#define MAX_STRING_LEN		80
-#define MAX_BUF_SIZE		0xFFFF
-
-#define USB_VENDOR_ID_SMSC	0x0424  /* VID: SMSC */
-#define USB_DEV_ID_BRDG		0xC001  /* PID: USB Bridge */
-#define USB_DEV_ID_OS81118	0xCF18  /* PID: USB OS81118 */
-#define USB_DEV_ID_OS81119	0xCF19  /* PID: USB OS81119 */
-#define USB_DEV_ID_OS81210	0xCF30  /* PID: USB OS81210 */
-/* DRCI Addresses */
-#define DRCI_REG_NI_STATE	0x0100
-#define DRCI_REG_PACKET_BW	0x0101
-#define DRCI_REG_NODE_ADDR	0x0102
-#define DRCI_REG_NODE_POS	0x0103
-#define DRCI_REG_MEP_FILTER	0x0140
-#define DRCI_REG_HASH_TBL0	0x0141
-#define DRCI_REG_HASH_TBL1	0x0142
-#define DRCI_REG_HASH_TBL2	0x0143
-#define DRCI_REG_HASH_TBL3	0x0144
-#define DRCI_REG_HW_ADDR_HI	0x0145
-#define DRCI_REG_HW_ADDR_MI	0x0146
-#define DRCI_REG_HW_ADDR_LO	0x0147
-#define DRCI_REG_BASE		0x1100
-#define DRCI_COMMAND		0x02
-#define DRCI_READ_REQ		0xA0
-#define DRCI_WRITE_REQ		0xA1
-
-/**
- * struct most_dci_obj - Direct Communication Interface
- * @kobj:position in sysfs
- * @usb_device: pointer to the usb device
- * @reg_addr: register address for arbitrary DCI access
- */
-struct most_dci_obj {
-	struct kobject kobj;
-	struct usb_device *usb_device;
-	u16 reg_addr;
-};
-
-#define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
-
-struct most_dev;
-
-struct clear_hold_work {
-	struct work_struct ws;
-	struct most_dev *mdev;
-	unsigned int channel;
-	int pipe;
-};
-
-#define to_clear_hold_work(w) container_of(w, struct clear_hold_work, ws)
-
-/**
- * struct most_dev - holds all usb interface specific stuff
- * @parent: parent object in sysfs
- * @usb_device: pointer to usb device
- * @iface: hardware interface
- * @cap: channel capabilities
- * @conf: channel configuration
- * @dci: direct communication interface of hardware
- * @ep_address: endpoint address table
- * @description: device description
- * @suffix: suffix for channel name
- * @channel_lock: synchronize channel access
- * @padding_active: indicates channel uses padding
- * @is_channel_healthy: health status table of each channel
- * @busy_urbs: list of anchored items
- * @io_mutex: synchronize I/O with disconnect
- * @link_stat_timer: timer for link status reports
- * @poll_work_obj: work for polling link status
- */
-struct most_dev {
-	struct kobject *parent;
-	struct usb_device *usb_device;
-	struct most_interface iface;
-	struct most_channel_capability *cap;
-	struct most_channel_config *conf;
-	struct most_dci_obj *dci;
-	u8 *ep_address;
-	char description[MAX_STRING_LEN];
-	char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
-	spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
-	bool padding_active[MAX_NUM_ENDPOINTS];
-	bool is_channel_healthy[MAX_NUM_ENDPOINTS];
-	struct clear_hold_work clear_work[MAX_NUM_ENDPOINTS];
-	struct usb_anchor *busy_urbs;
-	struct mutex io_mutex;
-	struct timer_list link_stat_timer;
-	struct work_struct poll_work_obj;
-	void (*on_netinfo)(struct most_interface *, unsigned char,
-			   unsigned char *);
-};
-
-#define to_mdev(d) container_of(d, struct most_dev, iface)
-#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
-
-static void wq_clear_halt(struct work_struct *wq_obj);
-static void wq_netinfo(struct work_struct *wq_obj);
-
-/**
- * drci_rd_reg - read a DCI register
- * @dev: usb device
- * @reg: register address
- * @buf: buffer to store data
- *
- * This is reads data from INIC's direct register communication interface
- */
-static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
-{
-	int retval;
-	__le16 *dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
-	u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-
-	if (!dma_buf)
-		return -ENOMEM;
-
-	retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
-				 DRCI_READ_REQ, req_type,
-				 0x0000,
-				 reg, dma_buf, sizeof(*dma_buf), 5 * HZ);
-	*buf = le16_to_cpu(*dma_buf);
-	kfree(dma_buf);
-
-	return retval;
-}
-
-/**
- * drci_wr_reg - write a DCI register
- * @dev: usb device
- * @reg: register address
- * @data: data to write
- *
- * This is writes data to INIC's direct register communication interface
- */
-static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
-{
-	return usb_control_msg(dev,
-			       usb_sndctrlpipe(dev, 0),
-			       DRCI_WRITE_REQ,
-			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-			       data,
-			       reg,
-			       NULL,
-			       0,
-			       5 * HZ);
-}
-
-static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
-{
-	return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1);
-}
-
-/**
- * get_stream_frame_size - calculate frame size of current configuration
- * @cfg: channel configuration
- */
-static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
-{
-	unsigned int frame_size = 0;
-	unsigned int sub_size = cfg->subbuffer_size;
-
-	if (!sub_size) {
-		pr_warn("Misconfig: Subbuffer size zero.\n");
-		return frame_size;
-	}
-	switch (cfg->data_type) {
-	case MOST_CH_ISOC:
-		frame_size = AV_PACKETS_PER_XACT * sub_size;
-		break;
-	case MOST_CH_SYNC:
-		if (cfg->packets_per_xact == 0) {
-			pr_warn("Misconfig: Packets per XACT zero\n");
-			frame_size = 0;
-		} else if (cfg->packets_per_xact == 0xFF) {
-			frame_size = (USB_MTU / sub_size) * sub_size;
-		} else {
-			frame_size = cfg->packets_per_xact * sub_size;
-		}
-		break;
-	default:
-		pr_warn("Query frame size of non-streaming channel\n");
-		break;
-	}
-	return frame_size;
-}
-
-/**
- * hdm_poison_channel - mark buffers of this channel as invalid
- * @iface: pointer to the interface
- * @channel: channel ID
- *
- * This unlinks all URBs submitted to the HCD,
- * calls the associated completion function of the core and removes
- * them from the list.
- *
- * Returns 0 on success or error code otherwise.
- */
-static int hdm_poison_channel(struct most_interface *iface, int channel)
-{
-	struct most_dev *mdev = to_mdev(iface);
-	unsigned long flags;
-	spinlock_t *lock; /* temp. lock */
-
-	if (unlikely(!iface)) {
-		dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
-		return -EIO;
-	}
-	if (unlikely(channel < 0 || channel >= iface->num_channels)) {
-		dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
-		return -ECHRNG;
-	}
-
-	lock = mdev->channel_lock + channel;
-	spin_lock_irqsave(lock, flags);
-	mdev->is_channel_healthy[channel] = false;
-	spin_unlock_irqrestore(lock, flags);
-
-	cancel_work_sync(&mdev->clear_work[channel].ws);
-
-	mutex_lock(&mdev->io_mutex);
-	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
-	if (mdev->padding_active[channel])
-		mdev->padding_active[channel] = false;
-
-	if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
-		del_timer_sync(&mdev->link_stat_timer);
-		cancel_work_sync(&mdev->poll_work_obj);
-	}
-	mutex_unlock(&mdev->io_mutex);
-	return 0;
-}
-
-/**
- * hdm_add_padding - add padding bytes
- * @mdev: most device
- * @channel: channel ID
- * @mbo: buffer object
- *
- * This inserts the INIC hardware specific padding bytes into a streaming
- * channel's buffer
- */
-static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
-{
-	struct most_channel_config *conf = &mdev->conf[channel];
-	unsigned int frame_size = get_stream_frame_size(conf);
-	unsigned int j, num_frames;
-
-	if (!frame_size)
-		return -EIO;
-	num_frames = mbo->buffer_length / frame_size;
-
-	if (num_frames < 1) {
-		dev_err(&mdev->usb_device->dev,
-			"Missed minimal transfer unit.\n");
-		return -EIO;
-	}
-
-	for (j = num_frames - 1; j > 0; j--)
-		memmove(mbo->virt_address + j * USB_MTU,
-			mbo->virt_address + j * frame_size,
-			frame_size);
-	mbo->buffer_length = num_frames * USB_MTU;
-	return 0;
-}
-
-/**
- * hdm_remove_padding - remove padding bytes
- * @mdev: most device
- * @channel: channel ID
- * @mbo: buffer object
- *
- * This takes the INIC hardware specific padding bytes off a streaming
- * channel's buffer.
- */
-static int hdm_remove_padding(struct most_dev *mdev, int channel,
-			      struct mbo *mbo)
-{
-	struct most_channel_config *const conf = &mdev->conf[channel];
-	unsigned int frame_size = get_stream_frame_size(conf);
-	unsigned int j, num_frames;
-
-	if (!frame_size)
-		return -EIO;
-	num_frames = mbo->processed_length / USB_MTU;
-
-	for (j = 1; j < num_frames; j++)
-		memmove(mbo->virt_address + frame_size * j,
-			mbo->virt_address + USB_MTU * j,
-			frame_size);
-
-	mbo->processed_length = frame_size * num_frames;
-	return 0;
-}
-
-/**
- * hdm_write_completion - completion function for submitted Tx URBs
- * @urb: the URB that has been completed
- *
- * This checks the status of the completed URB. In case the URB has been
- * unlinked before, it is immediately freed. On any other error the MBO
- * transfer flag is set. On success it frees allocated resources and calls
- * the completion function.
- *
- * Context: interrupt!
- */
-static void hdm_write_completion(struct urb *urb)
-{
-	struct mbo *mbo = urb->context;
-	struct most_dev *mdev = to_mdev(mbo->ifp);
-	unsigned int channel = mbo->hdm_channel_id;
-	struct device *dev = &mdev->usb_device->dev;
-	spinlock_t *lock = mdev->channel_lock + channel;
-	unsigned long flags;
-
-	spin_lock_irqsave(lock, flags);
-
-	mbo->processed_length = 0;
-	mbo->status = MBO_E_INVAL;
-	if (likely(mdev->is_channel_healthy[channel])) {
-		switch (urb->status) {
-		case 0:
-		case -ESHUTDOWN:
-			mbo->processed_length = urb->actual_length;
-			mbo->status = MBO_SUCCESS;
-			break;
-		case -EPIPE:
-			dev_warn(dev, "Broken OUT pipe detected\n");
-			mdev->is_channel_healthy[channel] = false;
-			mdev->clear_work[channel].pipe = urb->pipe;
-			schedule_work(&mdev->clear_work[channel].ws);
-			break;
-		case -ENODEV:
-		case -EPROTO:
-			mbo->status = MBO_E_CLOSE;
-			break;
-		}
-	}
-
-	spin_unlock_irqrestore(lock, flags);
-
-	if (likely(mbo->complete))
-		mbo->complete(mbo);
-	usb_free_urb(urb);
-}
-
-/**
- * hdm_read_completion - completion function for submitted Rx URBs
- * @urb: the URB that has been completed
- *
- * This checks the status of the completed URB. In case the URB has been
- * unlinked before it is immediately freed. On any other error the MBO transfer
- * flag is set. On success it frees allocated resources, removes
- * padding bytes -if necessary- and calls the completion function.
- *
- * Context: interrupt!
- *
- * **************************************************************************
- *                   Error codes returned by in urb->status
- *                   or in iso_frame_desc[n].status (for ISO)
- * *************************************************************************
- *
- * USB device drivers may only test urb status values in completion handlers.
- * This is because otherwise there would be a race between HCDs updating
- * these values on one CPU, and device drivers testing them on another CPU.
- *
- * A transfer's actual_length may be positive even when an error has been
- * reported.  That's because transfers often involve several packets, so that
- * one or more packets could finish before an error stops further endpoint I/O.
- *
- * For isochronous URBs, the urb status value is non-zero only if the URB is
- * unlinked, the device is removed, the host controller is disabled or the total
- * transferred length is less than the requested length and the URB_SHORT_NOT_OK
- * flag is set.  Completion handlers for isochronous URBs should only see
- * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
- * Individual frame descriptor status fields may report more status codes.
- *
- *
- * 0			Transfer completed successfully
- *
- * -ENOENT		URB was synchronously unlinked by usb_unlink_urb
- *
- * -EINPROGRESS		URB still pending, no results yet
- *			(That is, if drivers see this it's a bug.)
- *
- * -EPROTO (*, **)	a) bitstuff error
- *			b) no response packet received within the
- *			   prescribed bus turn-around time
- *			c) unknown USB error
- *
- * -EILSEQ (*, **)	a) CRC mismatch
- *			b) no response packet received within the
- *			   prescribed bus turn-around time
- *			c) unknown USB error
- *
- *			Note that often the controller hardware does not
- *			distinguish among cases a), b), and c), so a
- *			driver cannot tell whether there was a protocol
- *			error, a failure to respond (often caused by
- *			device disconnect), or some other fault.
- *
- * -ETIME (**)		No response packet received within the prescribed
- *			bus turn-around time.  This error may instead be
- *			reported as -EPROTO or -EILSEQ.
- *
- * -ETIMEDOUT		Synchronous USB message functions use this code
- *			to indicate timeout expired before the transfer
- *			completed, and no other error was reported by HC.
- *
- * -EPIPE (**)		Endpoint stalled.  For non-control endpoints,
- *			reset this status with usb_clear_halt().
- *
- * -ECOMM		During an IN transfer, the host controller
- *			received data from an endpoint faster than it
- *			could be written to system memory
- *
- * -ENOSR		During an OUT transfer, the host controller
- *			could not retrieve data from system memory fast
- *			enough to keep up with the USB data rate
- *
- * -EOVERFLOW (*)	The amount of data returned by the endpoint was
- *			greater than either the max packet size of the
- *			endpoint or the remaining buffer size.  "Babble".
- *
- * -EREMOTEIO		The data read from the endpoint did not fill the
- *			specified buffer, and URB_SHORT_NOT_OK was set in
- *			urb->transfer_flags.
- *
- * -ENODEV		Device was removed.  Often preceded by a burst of
- *			other errors, since the hub driver doesn't detect
- *			device removal events immediately.
- *
- * -EXDEV		ISO transfer only partially completed
- *			(only set in iso_frame_desc[n].status, not urb->status)
- *
- * -EINVAL		ISO madness, if this happens: Log off and go home
- *
- * -ECONNRESET		URB was asynchronously unlinked by usb_unlink_urb
- *
- * -ESHUTDOWN		The device or host controller has been disabled due
- *			to some problem that could not be worked around,
- *			such as a physical disconnect.
- *
- *
- * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
- * hardware problems such as bad devices (including firmware) or cables.
- *
- * (**) This is also one of several codes that different kinds of host
- * controller use to indicate a transfer has failed because of device
- * disconnect.  In the interval before the hub driver starts disconnect
- * processing, devices may receive such fault reports for every request.
- *
- * See <https://www.kernel.org/doc/Documentation/driver-api/usb/error-codes.rst>
- */
-static void hdm_read_completion(struct urb *urb)
-{
-	struct mbo *mbo = urb->context;
-	struct most_dev *mdev = to_mdev(mbo->ifp);
-	unsigned int channel = mbo->hdm_channel_id;
-	struct device *dev = &mdev->usb_device->dev;
-	spinlock_t *lock = mdev->channel_lock + channel;
-	unsigned long flags;
-
-	spin_lock_irqsave(lock, flags);
-
-	mbo->processed_length = 0;
-	mbo->status = MBO_E_INVAL;
-	if (likely(mdev->is_channel_healthy[channel])) {
-		switch (urb->status) {
-		case 0:
-		case -ESHUTDOWN:
-			mbo->processed_length = urb->actual_length;
-			mbo->status = MBO_SUCCESS;
-			if (mdev->padding_active[channel] &&
-			    hdm_remove_padding(mdev, channel, mbo)) {
-				mbo->processed_length = 0;
-				mbo->status = MBO_E_INVAL;
-			}
-			break;
-		case -EPIPE:
-			dev_warn(dev, "Broken IN pipe detected\n");
-			mdev->is_channel_healthy[channel] = false;
-			mdev->clear_work[channel].pipe = urb->pipe;
-			schedule_work(&mdev->clear_work[channel].ws);
-			break;
-		case -ENODEV:
-		case -EPROTO:
-			mbo->status = MBO_E_CLOSE;
-			break;
-		case -EOVERFLOW:
-			dev_warn(dev, "Babble on IN pipe detected\n");
-			break;
-		}
-	}
-
-	spin_unlock_irqrestore(lock, flags);
-
-	if (likely(mbo->complete))
-		mbo->complete(mbo);
-	usb_free_urb(urb);
-}
-
-/**
- * hdm_enqueue - receive a buffer to be used for data transfer
- * @iface: interface to enqueue to
- * @channel: ID of the channel
- * @mbo: pointer to the buffer object
- *
- * This allocates a new URB and fills it according to the channel
- * that is being used for transmission of data. Before the URB is
- * submitted it is stored in the private anchor list.
- *
- * Returns 0 on success. On any error the URB is freed and a error code
- * is returned.
- *
- * Context: Could in _some_ cases be interrupt!
- */
-static int hdm_enqueue(struct most_interface *iface, int channel,
-		       struct mbo *mbo)
-{
-	struct most_dev *mdev;
-	struct most_channel_config *conf;
-	struct device *dev;
-	int retval = 0;
-	struct urb *urb;
-	unsigned long length;
-	void *virt_address;
-
-	if (unlikely(!iface || !mbo))
-		return -EIO;
-	if (unlikely(iface->num_channels <= channel || channel < 0))
-		return -ECHRNG;
-
-	mdev = to_mdev(iface);
-	conf = &mdev->conf[channel];
-	dev = &mdev->usb_device->dev;
-
-	if (!mdev->usb_device)
-		return -ENODEV;
-
-	urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
-	if (!urb)
-		return -ENOMEM;
-
-	if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
-	    hdm_add_padding(mdev, channel, mbo)) {
-		retval = -EIO;
-		goto _error;
-	}
-
-	urb->transfer_dma = mbo->bus_address;
-	virt_address = mbo->virt_address;
-	length = mbo->buffer_length;
-
-	if (conf->direction & MOST_CH_TX) {
-		usb_fill_bulk_urb(urb, mdev->usb_device,
-				  usb_sndbulkpipe(mdev->usb_device,
-						  mdev->ep_address[channel]),
-				  virt_address,
-				  length,
-				  hdm_write_completion,
-				  mbo);
-		if (conf->data_type != MOST_CH_ISOC)
-			urb->transfer_flags |= URB_ZERO_PACKET;
-	} else {
-		usb_fill_bulk_urb(urb, mdev->usb_device,
-				  usb_rcvbulkpipe(mdev->usb_device,
-						  mdev->ep_address[channel]),
-				  virt_address,
-				  length + conf->extra_len,
-				  hdm_read_completion,
-				  mbo);
-	}
-	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
-	usb_anchor_urb(urb, &mdev->busy_urbs[channel]);
-
-	retval = usb_submit_urb(urb, GFP_KERNEL);
-	if (retval) {
-		dev_err(dev, "URB submit failed with error %d.\n", retval);
-		goto _error_1;
-	}
-	return 0;
-
-_error_1:
-	usb_unanchor_urb(urb);
-_error:
-	usb_free_urb(urb);
-	return retval;
-}
-
-/**
- * hdm_configure_channel - receive channel configuration from core
- * @iface: interface
- * @channel: channel ID
- * @conf: structure that holds the configuration information
- *
- * The attached network interface controller (NIC) supports a padding mode
- * to avoid short packets on USB, hence increasing the performance due to a
- * lower interrupt load. This mode is default for synchronous data and can
- * be switched on for isochronous data. In case padding is active the
- * driver needs to know the frame size of the payload in order to calculate
- * the number of bytes it needs to pad when transmitting or to cut off when
- * receiving data.
- *
- */
-static int hdm_configure_channel(struct most_interface *iface, int channel,
-				 struct most_channel_config *conf)
-{
-	unsigned int num_frames;
-	unsigned int frame_size;
-	struct most_dev *mdev = to_mdev(iface);
-	struct device *dev = &mdev->usb_device->dev;
-
-	mdev->is_channel_healthy[channel] = true;
-	mdev->clear_work[channel].channel = channel;
-	mdev->clear_work[channel].mdev = mdev;
-	INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
-
-	if (unlikely(!iface || !conf)) {
-		dev_err(dev, "Bad interface or config pointer.\n");
-		return -EINVAL;
-	}
-	if (unlikely(channel < 0 || channel >= iface->num_channels)) {
-		dev_err(dev, "Channel ID out of range.\n");
-		return -EINVAL;
-	}
-	if (!conf->num_buffers || !conf->buffer_size) {
-		dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
-		return -EINVAL;
-	}
-
-	if (conf->data_type != MOST_CH_SYNC &&
-	    !(conf->data_type == MOST_CH_ISOC &&
-	      conf->packets_per_xact != 0xFF)) {
-		mdev->padding_active[channel] = false;
-		/*
-		 * Since the NIC's padding mode is not going to be
-		 * used, we can skip the frame size calculations and
-		 * move directly on to exit.
-		 */
-		goto exit;
-	}
-
-	mdev->padding_active[channel] = true;
-
-	frame_size = get_stream_frame_size(conf);
-	if (frame_size == 0 || frame_size > USB_MTU) {
-		dev_warn(dev, "Misconfig: frame size wrong\n");
-		return -EINVAL;
-	}
-
-	num_frames = conf->buffer_size / frame_size;
-
-	if (conf->buffer_size % frame_size) {
-		u16 old_size = conf->buffer_size;
-
-		conf->buffer_size = num_frames * frame_size;
-		dev_warn(dev, "%s: fixed buffer size (%d -> %d)\n",
-			 mdev->suffix[channel], old_size, conf->buffer_size);
-	}
-
-	/* calculate extra length to comply w/ HW padding */
-	conf->extra_len = num_frames * (USB_MTU - frame_size);
-
-exit:
-	mdev->conf[channel] = *conf;
-	if (conf->data_type == MOST_CH_ASYNC) {
-		u16 ep = mdev->ep_address[channel];
-
-		if (start_sync_ep(mdev->usb_device, ep) < 0)
-			dev_warn(dev, "sync for ep%02x failed", ep);
-	}
-	return 0;
-}
-
-/**
- * hdm_request_netinfo - request network information
- * @iface: pointer to interface
- * @channel: channel ID
- *
- * This is used as trigger to set up the link status timer that
- * polls for the NI state of the INIC every 2 seconds.
- *
- */
-static void hdm_request_netinfo(struct most_interface *iface, int channel,
-				void (*on_netinfo)(struct most_interface *,
-						   unsigned char,
-						   unsigned char *))
-{
-	struct most_dev *mdev;
-
-	BUG_ON(!iface);
-	mdev = to_mdev(iface);
-	mdev->on_netinfo = on_netinfo;
-	if (!on_netinfo)
-		return;
-
-	mdev->link_stat_timer.expires = jiffies + HZ;
-	mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
-}
-
-/**
- * link_stat_timer_handler - schedule work obtaining mac address and link status
- * @data: pointer to USB device instance
- *
- * The handler runs in interrupt context. That's why we need to defer the
- * tasks to a work queue.
- */
-static void link_stat_timer_handler(unsigned long data)
-{
-	struct most_dev *mdev = (struct most_dev *)data;
-
-	schedule_work(&mdev->poll_work_obj);
-	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
-	add_timer(&mdev->link_stat_timer);
-}
-
-/**
- * wq_netinfo - work queue function to deliver latest networking information
- * @wq_obj: object that holds data for our deferred work to do
- *
- * This retrieves the network interface status of the USB INIC
- */
-static void wq_netinfo(struct work_struct *wq_obj)
-{
-	struct most_dev *mdev = to_mdev_from_work(wq_obj);
-	struct usb_device *usb_device = mdev->usb_device;
-	struct device *dev = &usb_device->dev;
-	u16 hi, mi, lo, link;
-	u8 hw_addr[6];
-
-	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
-		dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
-		return;
-	}
-
-	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
-		dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
-		return;
-	}
-
-	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
-		dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
-		return;
-	}
-
-	if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
-		dev_err(dev, "Vendor request 'link status' failed\n");
-		return;
-	}
-
-	hw_addr[0] = hi >> 8;
-	hw_addr[1] = hi;
-	hw_addr[2] = mi >> 8;
-	hw_addr[3] = mi;
-	hw_addr[4] = lo >> 8;
-	hw_addr[5] = lo;
-
-	if (mdev->on_netinfo)
-		mdev->on_netinfo(&mdev->iface, link, hw_addr);
-}
-
-/**
- * wq_clear_halt - work queue function
- * @wq_obj: work_struct object to execute
- *
- * This sends a clear_halt to the given USB pipe.
- */
-static void wq_clear_halt(struct work_struct *wq_obj)
-{
-	struct clear_hold_work *clear_work = to_clear_hold_work(wq_obj);
-	struct most_dev *mdev = clear_work->mdev;
-	unsigned int channel = clear_work->channel;
-	int pipe = clear_work->pipe;
-
-	mutex_lock(&mdev->io_mutex);
-	most_stop_enqueue(&mdev->iface, channel);
-	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
-	if (usb_clear_halt(mdev->usb_device, pipe))
-		dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
-
-	mdev->is_channel_healthy[channel] = true;
-	most_resume_enqueue(&mdev->iface, channel);
-	mutex_unlock(&mdev->io_mutex);
-}
-
-/**
- * hdm_usb_fops - file operation table for USB driver
- */
-static const struct file_operations hdm_usb_fops = {
-	.owner = THIS_MODULE,
-};
-
-/**
- * usb_device_id - ID table for HCD device probing
- */
-static const struct usb_device_id usbid[] = {
-	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
-	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81118), },
-	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81119), },
-	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81210), },
-	{ } /* Terminating entry */
-};
-
-#define MOST_DCI_RO_ATTR(_name) \
-	struct most_dci_attribute most_dci_attr_##_name = \
-		__ATTR(_name, 0444, show_value, NULL)
-
-#define MOST_DCI_ATTR(_name) \
-	struct most_dci_attribute most_dci_attr_##_name = \
-		__ATTR(_name, 0644, show_value, store_value)
-
-#define MOST_DCI_WO_ATTR(_name) \
-	struct most_dci_attribute most_dci_attr_##_name = \
-		__ATTR(_name, 0200, NULL, store_value)
-
-/**
- * struct most_dci_attribute - to access the attributes of a dci object
- * @attr: attributes of a dci object
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_dci_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct most_dci_obj *d,
-			struct most_dci_attribute *attr,
-			char *buf);
-	ssize_t (*store)(struct most_dci_obj *d,
-			 struct most_dci_attribute *attr,
-			 const char *buf,
-			 size_t count);
-};
-
-#define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
-
-/**
- * dci_attr_show - show function for dci object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- */
-static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
-			     char *buf)
-{
-	struct most_dci_attribute *dci_attr = to_dci_attr(attr);
-	struct most_dci_obj *dci_obj = to_dci_obj(kobj);
-
-	if (!dci_attr->show)
-		return -EIO;
-
-	return dci_attr->show(dci_obj, dci_attr, buf);
-}
-
-/**
- * dci_attr_store - store function for dci object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t dci_attr_store(struct kobject *kobj,
-			      struct attribute *attr,
-			      const char *buf,
-			      size_t len)
-{
-	struct most_dci_attribute *dci_attr = to_dci_attr(attr);
-	struct most_dci_obj *dci_obj = to_dci_obj(kobj);
-
-	if (!dci_attr->store)
-		return -EIO;
-
-	return dci_attr->store(dci_obj, dci_attr, buf, len);
-}
-
-static const struct sysfs_ops most_dci_sysfs_ops = {
-	.show = dci_attr_show,
-	.store = dci_attr_store,
-};
-
-/**
- * most_dci_release - release function for dci object
- * @kobj: pointer to kobject
- *
- * This frees the memory allocated for the dci object
- */
-static void most_dci_release(struct kobject *kobj)
-{
-	struct most_dci_obj *dci_obj = to_dci_obj(kobj);
-
-	kfree(dci_obj);
-}
-
-struct regs {
-	const char *name;
-	u16 reg;
-};
-
-static const struct regs ro_regs[] = {
-	{ "ni_state", DRCI_REG_NI_STATE },
-	{ "packet_bandwidth", DRCI_REG_PACKET_BW },
-	{ "node_address", DRCI_REG_NODE_ADDR },
-	{ "node_position", DRCI_REG_NODE_POS },
-};
-
-static const struct regs rw_regs[] = {
-	{ "mep_filter", DRCI_REG_MEP_FILTER },
-	{ "mep_hash0", DRCI_REG_HASH_TBL0 },
-	{ "mep_hash1", DRCI_REG_HASH_TBL1 },
-	{ "mep_hash2", DRCI_REG_HASH_TBL2 },
-	{ "mep_hash3", DRCI_REG_HASH_TBL3 },
-	{ "mep_eui48_hi", DRCI_REG_HW_ADDR_HI },
-	{ "mep_eui48_mi", DRCI_REG_HW_ADDR_MI },
-	{ "mep_eui48_lo", DRCI_REG_HW_ADDR_LO },
-};
-
-static int get_stat_reg_addr(const struct regs *regs, int size,
-			     const char *name, u16 *reg_addr)
-{
-	int i;
-
-	for (i = 0; i < size; i++) {
-		if (!strcmp(name, regs[i].name)) {
-			*reg_addr = regs[i].reg;
-			return 0;
-		}
-	}
-	return -EFAULT;
-}
-
-#define get_static_reg_addr(regs, name, reg_addr) \
-	get_stat_reg_addr(regs, ARRAY_SIZE(regs), name, reg_addr)
-
-static ssize_t show_value(struct most_dci_obj *dci_obj,
-			  struct most_dci_attribute *attr, char *buf)
-{
-	const char *name = attr->attr.name;
-	u16 val;
-	u16 reg_addr;
-	int err;
-
-	if (!strcmp(name, "arb_address"))
-		return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr);
-
-	if (!strcmp(name, "arb_value"))
-		reg_addr = dci_obj->reg_addr;
-	else if (get_static_reg_addr(ro_regs, name, &reg_addr) &&
-		 get_static_reg_addr(rw_regs, name, &reg_addr))
-		return -EFAULT;
-
-	err = drci_rd_reg(dci_obj->usb_device, reg_addr, &val);
-	if (err < 0)
-		return err;
-
-	return snprintf(buf, PAGE_SIZE, "%04x\n", val);
-}
-
-static ssize_t store_value(struct most_dci_obj *dci_obj,
-			   struct most_dci_attribute *attr,
-			   const char *buf, size_t count)
-{
-	u16 val;
-	u16 reg_addr;
-	const char *name = attr->attr.name;
-	struct usb_device *usb_dev = dci_obj->usb_device;
-	int err = kstrtou16(buf, 16, &val);
-
-	if (err)
-		return err;
-
-	if (!strcmp(name, "arb_address")) {
-		dci_obj->reg_addr = val;
-		return count;
-	}
-
-	if (!strcmp(name, "arb_value"))
-		err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
-	else if (!strcmp(name, "sync_ep"))
-		err = start_sync_ep(usb_dev, val);
-	else if (!get_static_reg_addr(rw_regs, name, &reg_addr))
-		err = drci_wr_reg(usb_dev, reg_addr, val);
-	else
-		return -EFAULT;
-
-	if (err < 0)
-		return err;
-
-	return count;
-}
-
-static MOST_DCI_RO_ATTR(ni_state);
-static MOST_DCI_RO_ATTR(packet_bandwidth);
-static MOST_DCI_RO_ATTR(node_address);
-static MOST_DCI_RO_ATTR(node_position);
-static MOST_DCI_WO_ATTR(sync_ep);
-static MOST_DCI_ATTR(mep_filter);
-static MOST_DCI_ATTR(mep_hash0);
-static MOST_DCI_ATTR(mep_hash1);
-static MOST_DCI_ATTR(mep_hash2);
-static MOST_DCI_ATTR(mep_hash3);
-static MOST_DCI_ATTR(mep_eui48_hi);
-static MOST_DCI_ATTR(mep_eui48_mi);
-static MOST_DCI_ATTR(mep_eui48_lo);
-static MOST_DCI_ATTR(arb_address);
-static MOST_DCI_ATTR(arb_value);
-
-/**
- * most_dci_def_attrs - array of default attribute files of the dci object
- */
-static struct attribute *most_dci_def_attrs[] = {
-	&most_dci_attr_ni_state.attr,
-	&most_dci_attr_packet_bandwidth.attr,
-	&most_dci_attr_node_address.attr,
-	&most_dci_attr_node_position.attr,
-	&most_dci_attr_sync_ep.attr,
-	&most_dci_attr_mep_filter.attr,
-	&most_dci_attr_mep_hash0.attr,
-	&most_dci_attr_mep_hash1.attr,
-	&most_dci_attr_mep_hash2.attr,
-	&most_dci_attr_mep_hash3.attr,
-	&most_dci_attr_mep_eui48_hi.attr,
-	&most_dci_attr_mep_eui48_mi.attr,
-	&most_dci_attr_mep_eui48_lo.attr,
-	&most_dci_attr_arb_address.attr,
-	&most_dci_attr_arb_value.attr,
-	NULL,
-};
-
-/**
- * DCI ktype
- */
-static struct kobj_type most_dci_ktype = {
-	.sysfs_ops = &most_dci_sysfs_ops,
-	.release = most_dci_release,
-	.default_attrs = most_dci_def_attrs,
-};
-
-/**
- * create_most_dci_obj - allocates a dci object
- * @parent: parent kobject
- *
- * This creates a dci object and registers it with sysfs.
- * Returns a pointer to the object or NULL when something went wrong.
- */
-static struct
-most_dci_obj *create_most_dci_obj(struct kobject *parent)
-{
-	struct most_dci_obj *most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
-	int retval;
-
-	if (!most_dci)
-		return NULL;
-
-	retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
-				      "dci");
-	if (retval) {
-		kobject_put(&most_dci->kobj);
-		return NULL;
-	}
-	return most_dci;
-}
-
-/**
- * destroy_most_dci_obj - DCI object release function
- * @p: pointer to dci object
- */
-static void destroy_most_dci_obj(struct most_dci_obj *p)
-{
-	kobject_put(&p->kobj);
-}
-
-/**
- * hdm_probe - probe function of USB device driver
- * @interface: Interface of the attached USB device
- * @id: Pointer to the USB ID table.
- *
- * This allocates and initializes the device instance, adds the new
- * entry to the internal list, scans the USB descriptors and registers
- * the interface with the core.
- * Additionally, the DCI objects are created and the hardware is sync'd.
- *
- * Return 0 on success. In case of an error a negative number is returned.
- */
-static int
-hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
-{
-	struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
-	struct usb_device *usb_dev = interface_to_usbdev(interface);
-	struct device *dev = &usb_dev->dev;
-	struct most_dev *mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
-	unsigned int i;
-	unsigned int num_endpoints;
-	struct most_channel_capability *tmp_cap;
-	struct usb_endpoint_descriptor *ep_desc;
-	int ret = 0;
-
-	if (!mdev)
-		goto exit_ENOMEM;
-
-	usb_set_intfdata(interface, mdev);
-	num_endpoints = usb_iface_desc->desc.bNumEndpoints;
-	mutex_init(&mdev->io_mutex);
-	INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
-	setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
-		    (unsigned long)mdev);
-
-	mdev->usb_device = usb_dev;
-	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
-
-	mdev->iface.mod = hdm_usb_fops.owner;
-	mdev->iface.interface = ITYPE_USB;
-	mdev->iface.configure = hdm_configure_channel;
-	mdev->iface.request_netinfo = hdm_request_netinfo;
-	mdev->iface.enqueue = hdm_enqueue;
-	mdev->iface.poison_channel = hdm_poison_channel;
-	mdev->iface.description = mdev->description;
-	mdev->iface.num_channels = num_endpoints;
-
-	snprintf(mdev->description, sizeof(mdev->description),
-		 "usb_device %d-%s:%d.%d",
-		 usb_dev->bus->busnum,
-		 usb_dev->devpath,
-		 usb_dev->config->desc.bConfigurationValue,
-		 usb_iface_desc->desc.bInterfaceNumber);
-
-	mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
-	if (!mdev->conf)
-		goto exit_free;
-
-	mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
-	if (!mdev->cap)
-		goto exit_free1;
-
-	mdev->iface.channel_vector = mdev->cap;
-	mdev->iface.priv = NULL;
-
-	mdev->ep_address =
-		kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
-	if (!mdev->ep_address)
-		goto exit_free2;
-
-	mdev->busy_urbs =
-		kcalloc(num_endpoints, sizeof(*mdev->busy_urbs), GFP_KERNEL);
-	if (!mdev->busy_urbs)
-		goto exit_free3;
-
-	tmp_cap = mdev->cap;
-	for (i = 0; i < num_endpoints; i++) {
-		ep_desc = &usb_iface_desc->endpoint[i].desc;
-		mdev->ep_address[i] = ep_desc->bEndpointAddress;
-		mdev->padding_active[i] = false;
-		mdev->is_channel_healthy[i] = true;
-
-		snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
-			 mdev->ep_address[i]);
-
-		tmp_cap->name_suffix = &mdev->suffix[i][0];
-		tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
-		tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
-		tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
-		tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
-		tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
-				     MOST_CH_ISOC | MOST_CH_SYNC;
-		if (usb_endpoint_dir_in(ep_desc))
-			tmp_cap->direction = MOST_CH_RX;
-		else
-			tmp_cap->direction = MOST_CH_TX;
-		tmp_cap++;
-		init_usb_anchor(&mdev->busy_urbs[i]);
-		spin_lock_init(&mdev->channel_lock[i]);
-	}
-	dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
-		   le16_to_cpu(usb_dev->descriptor.idVendor),
-		   le16_to_cpu(usb_dev->descriptor.idProduct),
-		   usb_dev->bus->busnum,
-		   usb_dev->devnum);
-
-	dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
-		   usb_dev->bus->busnum,
-		   usb_dev->devpath,
-		   usb_dev->config->desc.bConfigurationValue,
-		   usb_iface_desc->desc.bInterfaceNumber);
-
-	mdev->parent = most_register_interface(&mdev->iface);
-	if (IS_ERR(mdev->parent)) {
-		ret = PTR_ERR(mdev->parent);
-		goto exit_free4;
-	}
-
-	mutex_lock(&mdev->io_mutex);
-	if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81118 ||
-	    le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81119 ||
-	    le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81210) {
-		/* this increments the reference count of the instance
-		 * object of the core
-		 */
-		mdev->dci = create_most_dci_obj(mdev->parent);
-		if (!mdev->dci) {
-			mutex_unlock(&mdev->io_mutex);
-			most_deregister_interface(&mdev->iface);
-			ret = -ENOMEM;
-			goto exit_free4;
-		}
-
-		kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
-		mdev->dci->usb_device = mdev->usb_device;
-	}
-	mutex_unlock(&mdev->io_mutex);
-	return 0;
-
-exit_free4:
-	kfree(mdev->busy_urbs);
-exit_free3:
-	kfree(mdev->ep_address);
-exit_free2:
-	kfree(mdev->cap);
-exit_free1:
-	kfree(mdev->conf);
-exit_free:
-	kfree(mdev);
-exit_ENOMEM:
-	if (ret == 0 || ret == -ENOMEM) {
-		ret = -ENOMEM;
-		dev_err(dev, "out of memory\n");
-	}
-	return ret;
-}
-
-/**
- * hdm_disconnect - disconnect function of USB device driver
- * @interface: Interface of the attached USB device
- *
- * This deregisters the interface with the core, removes the kernel timer
- * and frees resources.
- *
- * Context: hub kernel thread
- */
-static void hdm_disconnect(struct usb_interface *interface)
-{
-	struct most_dev *mdev = usb_get_intfdata(interface);
-
-	mutex_lock(&mdev->io_mutex);
-	usb_set_intfdata(interface, NULL);
-	mdev->usb_device = NULL;
-	mutex_unlock(&mdev->io_mutex);
-
-	del_timer_sync(&mdev->link_stat_timer);
-	cancel_work_sync(&mdev->poll_work_obj);
-
-	destroy_most_dci_obj(mdev->dci);
-	most_deregister_interface(&mdev->iface);
-
-	kfree(mdev->busy_urbs);
-	kfree(mdev->cap);
-	kfree(mdev->conf);
-	kfree(mdev->ep_address);
-	kfree(mdev);
-}
-
-static struct usb_driver hdm_usb = {
-	.name = "hdm_usb",
-	.id_table = usbid,
-	.probe = hdm_probe,
-	.disconnect = hdm_disconnect,
-};
-
-module_usb_driver(hdm_usb);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Gromm <christian.gromm@xxxxxxxxxxxxx>");
-MODULE_DESCRIPTION("HDM_4_USB");
diff --git a/drivers/staging/most/i2c/Kconfig b/drivers/staging/most/i2c/Kconfig
new file mode 100644
index 0000000..79d0ff2
--- /dev/null
+++ b/drivers/staging/most/i2c/Kconfig
@@ -0,0 +1,12 @@
+#
+# MOST I2C configuration
+#
+
+config MOST_I2C
+	tristate "I2C"
+	depends on I2C
+	---help---
+	  Say Y here if you want to connect via I2C to network tranceiver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called most_i2c.
diff --git a/drivers/staging/most/i2c/Makefile b/drivers/staging/most/i2c/Makefile
new file mode 100644
index 0000000..a7d094c
--- /dev/null
+++ b/drivers/staging/most/i2c/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_I2C) += most_i2c.o
+
+most_i2c-objs := i2c.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
new file mode 100644
index 0000000..0a3e858
--- /dev/null
+++ b/drivers/staging/most/i2c/i2c.c
@@ -0,0 +1,423 @@
+/*
+ * i2c.c - Hardware Dependent Module for I2C Interface
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+
+#include <most/core.h>
+
+enum { CH_RX, CH_TX, NUM_CHANNELS };
+
+#define MAX_BUFFERS_CONTROL 32
+#define MAX_BUF_SIZE_CONTROL 256
+
+/**
+ * list_first_mbo - get the first mbo from a list
+ * @ptr:	the list head to take the mbo from.
+ */
+#define list_first_mbo(ptr) \
+	list_first_entry(ptr, struct mbo, list)
+
+/* IRQ / Polling option */
+static bool polling_req;
+module_param(polling_req, bool, 0444);
+MODULE_PARM_DESC(polling_req, "Request Polling. Default = 0 (use irq)");
+
+/* Polling Rate */
+static int scan_rate = 100;
+module_param(scan_rate, int, 0644);
+MODULE_PARM_DESC(scan_rate, "Polling rate in times/sec. Default = 100");
+
+struct hdm_i2c {
+	bool is_open[NUM_CHANNELS];
+	bool polling_mode;
+	struct most_interface most_iface;
+	struct most_channel_capability capabilities[NUM_CHANNELS];
+	struct i2c_client *client;
+	struct rx {
+		struct delayed_work dwork;
+		wait_queue_head_t waitq;
+		struct list_head list;
+		struct mutex list_mutex;
+	} rx;
+	char name[64];
+};
+
+#define to_hdm(iface) container_of(iface, struct hdm_i2c, most_iface)
+
+/**
+ * configure_channel - called from MOST core to configure a channel
+ * @iface: interface the channel belongs to
+ * @channel: channel to be configured
+ * @channel_config: structure that holds the configuration information
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Receives configuration information from MOST core and initialize the
+ * corresponding channel.
+ */
+static int configure_channel(struct most_interface *most_iface,
+			     int ch_idx,
+			     struct most_channel_config *channel_config)
+{
+	struct hdm_i2c *dev = to_hdm(most_iface);
+
+	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+	BUG_ON(dev->is_open[ch_idx]);
+
+	if (channel_config->data_type != MOST_CH_CONTROL) {
+		pr_err("bad data type for channel %d\n", ch_idx);
+		return -EPERM;
+	}
+
+	if (channel_config->direction != dev->capabilities[ch_idx].direction) {
+		pr_err("bad direction for channel %d\n", ch_idx);
+		return -EPERM;
+	}
+
+	if ((channel_config->direction == MOST_CH_RX) && (dev->polling_mode)) {
+		schedule_delayed_work(&dev->rx.dwork,
+				      msecs_to_jiffies(MSEC_PER_SEC / 4));
+	}
+	dev->is_open[ch_idx] = true;
+
+	return 0;
+}
+
+/**
+ * enqueue - called from MOST core to enqueue a buffer for data transfer
+ * @iface: intended interface
+ * @channel: ID of the channel the buffer is intended for
+ * @mbo: pointer to the buffer object
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Transmit the data over I2C if it is a "write" request or push the buffer into
+ * list if it is an "read" request
+ */
+static int enqueue(struct most_interface *most_iface,
+		   int ch_idx, struct mbo *mbo)
+{
+	struct hdm_i2c *dev = to_hdm(most_iface);
+	int ret;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+	BUG_ON(!dev->is_open[ch_idx]);
+
+	if (ch_idx == CH_RX) {
+		/* RX */
+		mutex_lock(&dev->rx.list_mutex);
+		list_add_tail(&mbo->list, &dev->rx.list);
+		mutex_unlock(&dev->rx.list_mutex);
+		wake_up_interruptible(&dev->rx.waitq);
+	} else {
+		/* TX */
+		ret = i2c_master_send(dev->client, mbo->virt_address,
+				      mbo->buffer_length);
+		if (ret <= 0) {
+			mbo->processed_length = 0;
+			mbo->status = MBO_E_INVAL;
+		} else {
+			mbo->processed_length = mbo->buffer_length;
+			mbo->status = MBO_SUCCESS;
+		}
+		mbo->complete(mbo);
+	}
+
+	return 0;
+}
+
+/**
+ * poison_channel - called from MOST core to poison buffers of a channel
+ * @iface: pointer to the interface the channel to be poisoned belongs to
+ * @channel_id: corresponding channel ID
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * If channel direction is RX, complete the buffers in list with
+ * status MBO_E_CLOSE
+ */
+static int poison_channel(struct most_interface *most_iface,
+			  int ch_idx)
+{
+	struct hdm_i2c *dev = to_hdm(most_iface);
+	struct mbo *mbo;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+	BUG_ON(!dev->is_open[ch_idx]);
+
+	dev->is_open[ch_idx] = false;
+
+	if (ch_idx == CH_RX) {
+		mutex_lock(&dev->rx.list_mutex);
+		while (!list_empty(&dev->rx.list)) {
+			mbo = list_first_mbo(&dev->rx.list);
+			list_del(&mbo->list);
+			mutex_unlock(&dev->rx.list_mutex);
+
+			mbo->processed_length = 0;
+			mbo->status = MBO_E_CLOSE;
+			mbo->complete(mbo);
+
+			mutex_lock(&dev->rx.list_mutex);
+		}
+		mutex_unlock(&dev->rx.list_mutex);
+		wake_up_interruptible(&dev->rx.waitq);
+	}
+
+	return 0;
+}
+
+static void do_rx_work(struct hdm_i2c *dev)
+{
+	struct mbo *mbo;
+	unsigned char msg[MAX_BUF_SIZE_CONTROL];
+	int ret, ch_idx = CH_RX;
+	u16 pml, data_size;
+
+	/* Read PML (2 bytes) */
+	ret = i2c_master_recv(dev->client, msg, 2);
+	if (ret <= 0) {
+		pr_err("Failed to receive PML\n");
+		return;
+	}
+
+	pml = (msg[0] << 8) | msg[1];
+	if (!pml)
+		return;
+
+	data_size = pml + 2;
+
+	/* Read the whole message, including PML */
+	ret = i2c_master_recv(dev->client, msg, data_size);
+	if (ret <= 0) {
+		pr_err("Failed to receive a Port Message\n");
+		return;
+	}
+
+	for (;;) {
+		/* Conditions to wait for: poisoned channel or free buffer
+		 * available for reading
+		 */
+		if (wait_event_interruptible(dev->rx.waitq,
+					     !dev->is_open[ch_idx] ||
+					     !list_empty(&dev->rx.list))) {
+			pr_err("wait_event_interruptible() failed\n");
+			return;
+		}
+
+		if (!dev->is_open[ch_idx])
+			return;
+
+		mutex_lock(&dev->rx.list_mutex);
+
+		/* list may be empty if poison or remove is called */
+		if (!list_empty(&dev->rx.list))
+			break;
+
+		mutex_unlock(&dev->rx.list_mutex);
+	}
+
+	mbo = list_first_mbo(&dev->rx.list);
+	list_del(&mbo->list);
+	mutex_unlock(&dev->rx.list_mutex);
+
+	mbo->processed_length = min(data_size, mbo->buffer_length);
+	memcpy(mbo->virt_address, msg, mbo->processed_length);
+	mbo->status = MBO_SUCCESS;
+	mbo->complete(mbo);
+}
+
+/**
+ * pending_rx_work - Read pending messages through I2C
+ * @work: definition of this work item
+ *
+ * Invoked by the Interrupt Service Routine, most_irq_handler()
+ */
+static void pending_rx_work(struct work_struct *work)
+{
+	struct hdm_i2c *dev = container_of(work, struct hdm_i2c, rx.dwork.work);
+
+	do_rx_work(dev);
+
+	if (dev->polling_mode) {
+		if (dev->is_open[CH_RX])
+			schedule_delayed_work(&dev->rx.dwork,
+					      msecs_to_jiffies(MSEC_PER_SEC
+							       / scan_rate));
+	} else {
+		enable_irq(dev->client->irq);
+	}
+}
+
+/*
+ * most_irq_handler - Interrupt Service Routine
+ * @irq: irq number
+ * @_dev: private data
+ *
+ * Schedules a delayed work
+ *
+ * By default the interrupt line behavior is Active Low. Once an interrupt is
+ * generated by the device, until driver clears the interrupt (by reading
+ * the PMP message), device keeps the interrupt line in low state. Since i2c
+ * read is done in work queue, the interrupt line must be disabled temporarily
+ * to avoid ISR being called repeatedly. Re-enable the interrupt in workqueue,
+ * after reading the message.
+ *
+ * Note: If we use the interrupt line in Falling edge mode, there is a
+ * possibility to miss interrupts when ISR is getting executed.
+ *
+ */
+static irqreturn_t most_irq_handler(int irq, void *_dev)
+{
+	struct hdm_i2c *dev = _dev;
+
+	disable_irq_nosync(irq);
+
+	schedule_delayed_work(&dev->rx.dwork, 0);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * i2c_probe - i2c probe handler
+ * @client: i2c client device structure
+ * @id: i2c client device id
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Register the i2c client device as a MOST interface
+ */
+static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	struct hdm_i2c *dev;
+	int ret, i;
+	struct kobject *kobj;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	/* ID format: i2c-<bus>-<address> */
+	snprintf(dev->name, sizeof(dev->name), "i2c-%d-%04x",
+		 client->adapter->nr, client->addr);
+
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		dev->is_open[i] = false;
+		dev->capabilities[i].data_type = MOST_CH_CONTROL;
+		dev->capabilities[i].num_buffers_packet = MAX_BUFFERS_CONTROL;
+		dev->capabilities[i].buffer_size_packet = MAX_BUF_SIZE_CONTROL;
+	}
+	dev->capabilities[CH_RX].direction = MOST_CH_RX;
+	dev->capabilities[CH_RX].name_suffix = "rx";
+	dev->capabilities[CH_TX].direction = MOST_CH_TX;
+	dev->capabilities[CH_TX].name_suffix = "tx";
+
+	dev->most_iface.interface = ITYPE_I2C;
+	dev->most_iface.description = dev->name;
+	dev->most_iface.num_channels = NUM_CHANNELS;
+	dev->most_iface.channel_vector = dev->capabilities;
+	dev->most_iface.configure = configure_channel;
+	dev->most_iface.enqueue = enqueue;
+	dev->most_iface.poison_channel = poison_channel;
+
+	INIT_LIST_HEAD(&dev->rx.list);
+	mutex_init(&dev->rx.list_mutex);
+	init_waitqueue_head(&dev->rx.waitq);
+
+	INIT_DELAYED_WORK(&dev->rx.dwork, pending_rx_work);
+
+	dev->client = client;
+	i2c_set_clientdata(client, dev);
+
+	kobj = most_register_interface(&dev->most_iface);
+	if (IS_ERR(kobj)) {
+		pr_err("Failed to register i2c as a MOST interface\n");
+		kfree(dev);
+		return PTR_ERR(kobj);
+	}
+
+	dev->polling_mode = polling_req || client->irq <= 0;
+	if (!dev->polling_mode) {
+		pr_info("Requesting IRQ: %d\n", client->irq);
+		ret = request_irq(client->irq, most_irq_handler, 0,
+				  client->name, dev);
+		if (ret) {
+			pr_info("IRQ request failed: %d, falling back to polling\n",
+				ret);
+			dev->polling_mode = true;
+		}
+	}
+
+	if (dev->polling_mode)
+		pr_info("Using polling at rate: %d times/sec\n", scan_rate);
+
+	return 0;
+}
+
+/*
+ * i2c_remove - i2c remove handler
+ * @client: i2c client device structure
+ *
+ * Return 0 on success.
+ *
+ * Unregister the i2c client device as a MOST interface
+ */
+static int i2c_remove(struct i2c_client *client)
+{
+	struct hdm_i2c *dev = i2c_get_clientdata(client);
+	int i;
+
+	if (!dev->polling_mode)
+		free_irq(client->irq, dev);
+
+	most_deregister_interface(&dev->most_iface);
+
+	for (i = 0 ; i < NUM_CHANNELS; i++)
+		if (dev->is_open[i])
+			poison_channel(&dev->most_iface, i);
+	cancel_delayed_work_sync(&dev->rx.dwork);
+	kfree(dev);
+
+	return 0;
+}
+
+static const struct i2c_device_id i2c_id[] = {
+	{ "most_i2c", 0 },
+	{ }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(i2c, i2c_id);
+
+static struct i2c_driver i2c_driver = {
+	.driver = {
+		.name = "hdm_i2c",
+	},
+	.probe = i2c_probe,
+	.remove = i2c_remove,
+	.id_table = i2c_id,
+};
+
+module_i2c_driver(i2c_driver);
+
+MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@xxxxxxxxxxxxx>");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@xxxxxx>");
+MODULE_DESCRIPTION("I2C Hardware Dependent Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
deleted file mode 100644
index 4717254..0000000
--- a/drivers/staging/most/mostcore/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# MOSTCore configuration
-#
-
-config MOSTCORE
-	tristate "MOST Core"
-	depends on HAS_DMA
-
-	---help---
-	  Say Y here if you want to enable MOST support.
-	  This device driver needs at least an additional AIM and HDM to work.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called mostcore.
diff --git a/drivers/staging/most/mostcore/Makefile b/drivers/staging/most/mostcore/Makefile
deleted file mode 100644
index a078f01..0000000
--- a/drivers/staging/most/mostcore/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_MOSTCORE) += mostcore.o
-
-mostcore-objs := core.o
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
deleted file mode 100644
index 069269d..0000000
--- a/drivers/staging/most/mostcore/core.c
+++ /dev/null
@@ -1,1949 +0,0 @@
-/*
- * core.c - Implementation of core module of MOST Linux driver stack
- *
- * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/kobject.h>
-#include <linux/mutex.h>
-#include <linux/completion.h>
-#include <linux/sysfs.h>
-#include <linux/kthread.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-#include "mostcore.h"
-
-#define MAX_CHANNELS	64
-#define STRING_SIZE	80
-
-static struct class *most_class;
-static struct device *core_dev;
-static struct ida mdev_id;
-static int dummy_num_buffers;
-
-struct most_c_aim_obj {
-	struct most_aim *ptr;
-	int refs;
-	int num_buffers;
-};
-
-struct most_c_obj {
-	struct kobject kobj;
-	struct completion cleanup;
-	atomic_t mbo_ref;
-	atomic_t mbo_nq_level;
-	u16 channel_id;
-	bool is_poisoned;
-	struct mutex start_mutex;
-	struct mutex nq_mutex; /* nq thread synchronization */
-	int is_starving;
-	struct most_interface *iface;
-	struct most_inst_obj *inst;
-	struct most_channel_config cfg;
-	bool keep_mbo;
-	bool enqueue_halt;
-	struct list_head fifo;
-	spinlock_t fifo_lock;
-	struct list_head halt_fifo;
-	struct list_head list;
-	struct most_c_aim_obj aim0;
-	struct most_c_aim_obj aim1;
-	struct list_head trash_fifo;
-	struct task_struct *hdm_enqueue_task;
-	wait_queue_head_t hdm_fifo_wq;
-};
-
-#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
-
-struct most_inst_obj {
-	int dev_id;
-	struct most_interface *iface;
-	struct list_head channel_list;
-	struct most_c_obj *channel[MAX_CHANNELS];
-	struct kobject kobj;
-	struct list_head list;
-};
-
-static const struct {
-	int most_ch_data_type;
-	const char *name;
-} ch_data_type[] = {
-	{ MOST_CH_CONTROL, "control\n" },
-	{ MOST_CH_ASYNC, "async\n" },
-	{ MOST_CH_SYNC, "sync\n" },
-	{ MOST_CH_ISOC, "isoc\n"},
-	{ MOST_CH_ISOC, "isoc_avp\n"},
-};
-
-#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
-
-/**
- * list_pop_mbo - retrieves the first MBO of the list and removes it
- * @ptr: the list head to grab the MBO from.
- */
-#define list_pop_mbo(ptr)						\
-({									\
-	struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);	\
-	list_del(&_mbo->list);						\
-	_mbo;								\
-})
-
-/*		     ___	     ___
- *		     ___C H A N N E L___
- */
-
-/**
- * struct most_c_attr - to access the attributes of a channel object
- * @attr: attributes of a channel
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_c_attr {
-	struct attribute attr;
-	ssize_t (*show)(struct most_c_obj *d,
-			struct most_c_attr *attr,
-			char *buf);
-	ssize_t (*store)(struct most_c_obj *d,
-			 struct most_c_attr *attr,
-			 const char *buf,
-			 size_t count);
-};
-
-#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
-
-/**
- * channel_attr_show - show function of channel object
- * @kobj: pointer to its kobject
- * @attr: pointer to its attributes
- * @buf: buffer
- */
-static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
-				 char *buf)
-{
-	struct most_c_attr *channel_attr = to_channel_attr(attr);
-	struct most_c_obj *c_obj = to_c_obj(kobj);
-
-	if (!channel_attr->show)
-		return -EIO;
-
-	return channel_attr->show(c_obj, channel_attr, buf);
-}
-
-/**
- * channel_attr_store - store function of channel object
- * @kobj: pointer to its kobject
- * @attr: pointer to its attributes
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t channel_attr_store(struct kobject *kobj,
-				  struct attribute *attr,
-				  const char *buf,
-				  size_t len)
-{
-	struct most_c_attr *channel_attr = to_channel_attr(attr);
-	struct most_c_obj *c_obj = to_c_obj(kobj);
-
-	if (!channel_attr->store)
-		return -EIO;
-	return channel_attr->store(c_obj, channel_attr, buf, len);
-}
-
-static const struct sysfs_ops most_channel_sysfs_ops = {
-	.show = channel_attr_show,
-	.store = channel_attr_store,
-};
-
-/**
- * most_free_mbo_coherent - free an MBO and its coherent buffer
- * @mbo: buffer to be released
- *
- */
-static void most_free_mbo_coherent(struct mbo *mbo)
-{
-	struct most_c_obj *c = mbo->context;
-	u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
-
-	dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
-			  mbo->bus_address);
-	kfree(mbo);
-	if (atomic_sub_and_test(1, &c->mbo_ref))
-		complete(&c->cleanup);
-}
-
-/**
- * flush_channel_fifos - clear the channel fifos
- * @c: pointer to channel object
- */
-static void flush_channel_fifos(struct most_c_obj *c)
-{
-	unsigned long flags, hf_flags;
-	struct mbo *mbo, *tmp;
-
-	if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
-		return;
-
-	spin_lock_irqsave(&c->fifo_lock, flags);
-	list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
-		list_del(&mbo->list);
-		spin_unlock_irqrestore(&c->fifo_lock, flags);
-		most_free_mbo_coherent(mbo);
-		spin_lock_irqsave(&c->fifo_lock, flags);
-	}
-	spin_unlock_irqrestore(&c->fifo_lock, flags);
-
-	spin_lock_irqsave(&c->fifo_lock, hf_flags);
-	list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
-		list_del(&mbo->list);
-		spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
-		most_free_mbo_coherent(mbo);
-		spin_lock_irqsave(&c->fifo_lock, hf_flags);
-	}
-	spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
-
-	if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
-		pr_info("WARN: fifo | trash fifo not empty\n");
-}
-
-/**
- * flush_trash_fifo - clear the trash fifo
- * @c: pointer to channel object
- */
-static int flush_trash_fifo(struct most_c_obj *c)
-{
-	struct mbo *mbo, *tmp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&c->fifo_lock, flags);
-	list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
-		list_del(&mbo->list);
-		spin_unlock_irqrestore(&c->fifo_lock, flags);
-		most_free_mbo_coherent(mbo);
-		spin_lock_irqsave(&c->fifo_lock, flags);
-	}
-	spin_unlock_irqrestore(&c->fifo_lock, flags);
-	return 0;
-}
-
-/**
- * most_channel_release - release function of channel object
- * @kobj: pointer to channel's kobject
- */
-static void most_channel_release(struct kobject *kobj)
-{
-	struct most_c_obj *c = to_c_obj(kobj);
-
-	kfree(c);
-}
-
-static ssize_t available_directions_show(struct most_c_obj *c,
-					 struct most_c_attr *attr,
-					 char *buf)
-{
-	unsigned int i = c->channel_id;
-
-	strcpy(buf, "");
-	if (c->iface->channel_vector[i].direction & MOST_CH_RX)
-		strcat(buf, "rx ");
-	if (c->iface->channel_vector[i].direction & MOST_CH_TX)
-		strcat(buf, "tx ");
-	strcat(buf, "\n");
-	return strlen(buf);
-}
-
-static ssize_t available_datatypes_show(struct most_c_obj *c,
-					struct most_c_attr *attr,
-					char *buf)
-{
-	unsigned int i = c->channel_id;
-
-	strcpy(buf, "");
-	if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
-		strcat(buf, "control ");
-	if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
-		strcat(buf, "async ");
-	if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
-		strcat(buf, "sync ");
-	if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
-		strcat(buf, "isoc ");
-	strcat(buf, "\n");
-	return strlen(buf);
-}
-
-static ssize_t number_of_packet_buffers_show(struct most_c_obj *c,
-					     struct most_c_attr *attr,
-					     char *buf)
-{
-	unsigned int i = c->channel_id;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			c->iface->channel_vector[i].num_buffers_packet);
-}
-
-static ssize_t number_of_stream_buffers_show(struct most_c_obj *c,
-					     struct most_c_attr *attr,
-					     char *buf)
-{
-	unsigned int i = c->channel_id;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			c->iface->channel_vector[i].num_buffers_streaming);
-}
-
-static ssize_t size_of_packet_buffer_show(struct most_c_obj *c,
-					  struct most_c_attr *attr,
-					  char *buf)
-{
-	unsigned int i = c->channel_id;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			c->iface->channel_vector[i].buffer_size_packet);
-}
-
-static ssize_t size_of_stream_buffer_show(struct most_c_obj *c,
-					  struct most_c_attr *attr,
-					  char *buf)
-{
-	unsigned int i = c->channel_id;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n",
-			c->iface->channel_vector[i].buffer_size_streaming);
-}
-
-static ssize_t channel_starving_show(struct most_c_obj *c,
-				     struct most_c_attr *attr,
-				     char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
-}
-
-static ssize_t set_number_of_buffers_show(struct most_c_obj *c,
-					  struct most_c_attr *attr,
-					  char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
-}
-
-static ssize_t set_number_of_buffers_store(struct most_c_obj *c,
-					   struct most_c_attr *attr,
-					   const char *buf,
-					   size_t count)
-{
-	int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
-
-	if (ret)
-		return ret;
-	return count;
-}
-
-static ssize_t set_buffer_size_show(struct most_c_obj *c,
-				    struct most_c_attr *attr,
-				    char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
-}
-
-static ssize_t set_buffer_size_store(struct most_c_obj *c,
-				     struct most_c_attr *attr,
-				     const char *buf,
-				     size_t count)
-{
-	int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
-
-	if (ret)
-		return ret;
-	return count;
-}
-
-static ssize_t set_direction_show(struct most_c_obj *c,
-				  struct most_c_attr *attr,
-				  char *buf)
-{
-	if (c->cfg.direction & MOST_CH_TX)
-		return snprintf(buf, PAGE_SIZE, "tx\n");
-	else if (c->cfg.direction & MOST_CH_RX)
-		return snprintf(buf, PAGE_SIZE, "rx\n");
-	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
-}
-
-static ssize_t set_direction_store(struct most_c_obj *c,
-				   struct most_c_attr *attr,
-				   const char *buf,
-				   size_t count)
-{
-	if (!strcmp(buf, "dir_rx\n")) {
-		c->cfg.direction = MOST_CH_RX;
-	} else if (!strcmp(buf, "rx\n")) {
-		c->cfg.direction = MOST_CH_RX;
-	} else if (!strcmp(buf, "dir_tx\n")) {
-		c->cfg.direction = MOST_CH_TX;
-	} else if (!strcmp(buf, "tx\n")) {
-		c->cfg.direction = MOST_CH_TX;
-	} else {
-		pr_info("WARN: invalid attribute settings\n");
-		return -EINVAL;
-	}
-	return count;
-}
-
-static ssize_t set_datatype_show(struct most_c_obj *c,
-				 struct most_c_attr *attr,
-				 char *buf)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
-		if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
-			return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
-	}
-	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
-}
-
-static ssize_t set_datatype_store(struct most_c_obj *c,
-				  struct most_c_attr *attr,
-				  const char *buf,
-				  size_t count)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
-		if (!strcmp(buf, ch_data_type[i].name)) {
-			c->cfg.data_type = ch_data_type[i].most_ch_data_type;
-			break;
-		}
-	}
-
-	if (i == ARRAY_SIZE(ch_data_type)) {
-		pr_info("WARN: invalid attribute settings\n");
-		return -EINVAL;
-	}
-	return count;
-}
-
-static ssize_t set_subbuffer_size_show(struct most_c_obj *c,
-				       struct most_c_attr *attr,
-				       char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
-}
-
-static ssize_t set_subbuffer_size_store(struct most_c_obj *c,
-					struct most_c_attr *attr,
-					const char *buf,
-					size_t count)
-{
-	int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
-
-	if (ret)
-		return ret;
-	return count;
-}
-
-static ssize_t set_packets_per_xact_show(struct most_c_obj *c,
-					 struct most_c_attr *attr,
-					 char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
-}
-
-static ssize_t set_packets_per_xact_store(struct most_c_obj *c,
-					  struct most_c_attr *attr,
-					  const char *buf,
-					  size_t count)
-{
-	int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
-
-	if (ret)
-		return ret;
-	return count;
-}
-
-static struct most_c_attr most_c_attrs[] = {
-	__ATTR_RO(available_directions),
-	__ATTR_RO(available_datatypes),
-	__ATTR_RO(number_of_packet_buffers),
-	__ATTR_RO(number_of_stream_buffers),
-	__ATTR_RO(size_of_stream_buffer),
-	__ATTR_RO(size_of_packet_buffer),
-	__ATTR_RO(channel_starving),
-	__ATTR_RW(set_buffer_size),
-	__ATTR_RW(set_number_of_buffers),
-	__ATTR_RW(set_direction),
-	__ATTR_RW(set_datatype),
-	__ATTR_RW(set_subbuffer_size),
-	__ATTR_RW(set_packets_per_xact),
-};
-
-/**
- * most_channel_def_attrs - array of default attributes of channel object
- */
-static struct attribute *most_channel_def_attrs[] = {
-	&most_c_attrs[0].attr,
-	&most_c_attrs[1].attr,
-	&most_c_attrs[2].attr,
-	&most_c_attrs[3].attr,
-	&most_c_attrs[4].attr,
-	&most_c_attrs[5].attr,
-	&most_c_attrs[6].attr,
-	&most_c_attrs[7].attr,
-	&most_c_attrs[8].attr,
-	&most_c_attrs[9].attr,
-	&most_c_attrs[10].attr,
-	&most_c_attrs[11].attr,
-	&most_c_attrs[12].attr,
-	NULL,
-};
-
-static struct kobj_type most_channel_ktype = {
-	.sysfs_ops = &most_channel_sysfs_ops,
-	.release = most_channel_release,
-	.default_attrs = most_channel_def_attrs,
-};
-
-static struct kset *most_channel_kset;
-
-/**
- * create_most_c_obj - allocates a channel object
- * @name: name of the channel object
- * @parent: parent kobject
- *
- * This create a channel object and registers it with sysfs.
- * Returns a pointer to the object or NULL when something went wrong.
- */
-static struct most_c_obj *
-create_most_c_obj(const char *name, struct kobject *parent)
-{
-	struct most_c_obj *c;
-	int retval;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return NULL;
-	c->kobj.kset = most_channel_kset;
-	retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
-				      "%s", name);
-	if (retval) {
-		kobject_put(&c->kobj);
-		return NULL;
-	}
-	kobject_uevent(&c->kobj, KOBJ_ADD);
-	return c;
-}
-
-/*		     ___	       ___
- *		     ___I N S T A N C E___
- */
-
-static struct list_head instance_list;
-
-/**
- * struct most_inst_attribute - to access the attributes of instance object
- * @attr: attributes of an instance
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_inst_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct most_inst_obj *d,
-			struct most_inst_attribute *attr,
-			char *buf);
-	ssize_t (*store)(struct most_inst_obj *d,
-			 struct most_inst_attribute *attr,
-			 const char *buf,
-			 size_t count);
-};
-
-#define to_instance_attr(a) \
-	container_of(a, struct most_inst_attribute, attr)
-
-/**
- * instance_attr_show - show function for an instance object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- */
-static ssize_t instance_attr_show(struct kobject *kobj,
-				  struct attribute *attr,
-				  char *buf)
-{
-	struct most_inst_attribute *instance_attr;
-	struct most_inst_obj *instance_obj;
-
-	instance_attr = to_instance_attr(attr);
-	instance_obj = to_inst_obj(kobj);
-
-	if (!instance_attr->show)
-		return -EIO;
-
-	return instance_attr->show(instance_obj, instance_attr, buf);
-}
-
-/**
- * instance_attr_store - store function for an instance object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t instance_attr_store(struct kobject *kobj,
-				   struct attribute *attr,
-				   const char *buf,
-				   size_t len)
-{
-	struct most_inst_attribute *instance_attr;
-	struct most_inst_obj *instance_obj;
-
-	instance_attr = to_instance_attr(attr);
-	instance_obj = to_inst_obj(kobj);
-
-	if (!instance_attr->store)
-		return -EIO;
-
-	return instance_attr->store(instance_obj, instance_attr, buf, len);
-}
-
-static const struct sysfs_ops most_inst_sysfs_ops = {
-	.show = instance_attr_show,
-	.store = instance_attr_store,
-};
-
-/**
- * most_inst_release - release function for instance object
- * @kobj: pointer to instance's kobject
- *
- * This frees the allocated memory for the instance object
- */
-static void most_inst_release(struct kobject *kobj)
-{
-	struct most_inst_obj *inst = to_inst_obj(kobj);
-
-	kfree(inst);
-}
-
-static ssize_t description_show(struct most_inst_obj *instance_obj,
-				struct most_inst_attribute *attr,
-				char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n",
-			instance_obj->iface->description);
-}
-
-static ssize_t interface_show(struct most_inst_obj *instance_obj,
-			      struct most_inst_attribute *attr,
-			      char *buf)
-{
-	switch (instance_obj->iface->interface) {
-	case ITYPE_LOOPBACK:
-		return snprintf(buf, PAGE_SIZE, "loopback\n");
-	case ITYPE_I2C:
-		return snprintf(buf, PAGE_SIZE, "i2c\n");
-	case ITYPE_I2S:
-		return snprintf(buf, PAGE_SIZE, "i2s\n");
-	case ITYPE_TSI:
-		return snprintf(buf, PAGE_SIZE, "tsi\n");
-	case ITYPE_HBI:
-		return snprintf(buf, PAGE_SIZE, "hbi\n");
-	case ITYPE_MEDIALB_DIM:
-		return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
-	case ITYPE_MEDIALB_DIM2:
-		return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
-	case ITYPE_USB:
-		return snprintf(buf, PAGE_SIZE, "usb\n");
-	case ITYPE_PCIE:
-		return snprintf(buf, PAGE_SIZE, "pcie\n");
-	}
-	return snprintf(buf, PAGE_SIZE, "unknown\n");
-}
-
-static struct most_inst_attribute most_inst_attr_description =
-	__ATTR_RO(description);
-
-static struct most_inst_attribute most_inst_attr_interface =
-	__ATTR_RO(interface);
-
-static struct attribute *most_inst_def_attrs[] = {
-	&most_inst_attr_description.attr,
-	&most_inst_attr_interface.attr,
-	NULL,
-};
-
-static struct kobj_type most_inst_ktype = {
-	.sysfs_ops = &most_inst_sysfs_ops,
-	.release = most_inst_release,
-	.default_attrs = most_inst_def_attrs,
-};
-
-static struct kset *most_inst_kset;
-
-/**
- * create_most_inst_obj - creates an instance object
- * @name: name of the object to be created
- *
- * This allocates memory for an instance structure, assigns the proper kset
- * and registers it with sysfs.
- *
- * Returns a pointer to the instance object or NULL when something went wrong.
- */
-static struct most_inst_obj *create_most_inst_obj(const char *name)
-{
-	struct most_inst_obj *inst;
-	int retval;
-
-	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
-	if (!inst)
-		return NULL;
-	inst->kobj.kset = most_inst_kset;
-	retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
-				      "%s", name);
-	if (retval) {
-		kobject_put(&inst->kobj);
-		return NULL;
-	}
-	kobject_uevent(&inst->kobj, KOBJ_ADD);
-	return inst;
-}
-
-/**
- * destroy_most_inst_obj - MOST instance release function
- * @inst: pointer to the instance object
- *
- * This decrements the reference counter of the instance object.
- * If the reference count turns zero, its release function is called
- */
-static void destroy_most_inst_obj(struct most_inst_obj *inst)
-{
-	struct most_c_obj *c, *tmp;
-
-	list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
-		flush_trash_fifo(c);
-		flush_channel_fifos(c);
-		kobject_put(&c->kobj);
-	}
-	kobject_put(&inst->kobj);
-}
-
-/*		     ___     ___
- *		     ___A I M___
- */
-struct most_aim_obj {
-	struct kobject kobj;
-	struct list_head list;
-	struct most_aim *driver;
-};
-
-#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
-
-static struct list_head aim_list;
-
-/**
- * struct most_aim_attribute - to access the attributes of AIM object
- * @attr: attributes of an AIM
- * @show: pointer to the show function
- * @store: pointer to the store function
- */
-struct most_aim_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct most_aim_obj *d,
-			struct most_aim_attribute *attr,
-			char *buf);
-	ssize_t (*store)(struct most_aim_obj *d,
-			 struct most_aim_attribute *attr,
-			 const char *buf,
-			 size_t count);
-};
-
-#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
-
-/**
- * aim_attr_show - show function of an AIM object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- */
-static ssize_t aim_attr_show(struct kobject *kobj,
-			     struct attribute *attr,
-			     char *buf)
-{
-	struct most_aim_attribute *aim_attr;
-	struct most_aim_obj *aim_obj;
-
-	aim_attr = to_aim_attr(attr);
-	aim_obj = to_aim_obj(kobj);
-
-	if (!aim_attr->show)
-		return -EIO;
-
-	return aim_attr->show(aim_obj, aim_attr, buf);
-}
-
-/**
- * aim_attr_store - store function of an AIM object
- * @kobj: pointer to kobject
- * @attr: pointer to attribute struct
- * @buf: buffer
- * @len: length of buffer
- */
-static ssize_t aim_attr_store(struct kobject *kobj,
-			      struct attribute *attr,
-			      const char *buf,
-			      size_t len)
-{
-	struct most_aim_attribute *aim_attr;
-	struct most_aim_obj *aim_obj;
-
-	aim_attr = to_aim_attr(attr);
-	aim_obj = to_aim_obj(kobj);
-
-	if (!aim_attr->store)
-		return -EIO;
-	return aim_attr->store(aim_obj, aim_attr, buf, len);
-}
-
-static const struct sysfs_ops most_aim_sysfs_ops = {
-	.show = aim_attr_show,
-	.store = aim_attr_store,
-};
-
-/**
- * most_aim_release - AIM release function
- * @kobj: pointer to AIM's kobject
- */
-static void most_aim_release(struct kobject *kobj)
-{
-	struct most_aim_obj *aim_obj = to_aim_obj(kobj);
-
-	kfree(aim_obj);
-}
-
-static ssize_t links_show(struct most_aim_obj *aim_obj,
-			  struct most_aim_attribute *attr,
-			  char *buf)
-{
-	struct most_c_obj *c;
-	struct most_inst_obj *i;
-	int offs = 0;
-
-	list_for_each_entry(i, &instance_list, list) {
-		list_for_each_entry(c, &i->channel_list, list) {
-			if (c->aim0.ptr == aim_obj->driver ||
-			    c->aim1.ptr == aim_obj->driver) {
-				offs += snprintf(buf + offs, PAGE_SIZE - offs,
-						 "%s:%s\n",
-						 kobject_name(&i->kobj),
-						 kobject_name(&c->kobj));
-			}
-		}
-	}
-
-	return offs;
-}
-
-/**
- * split_string - parses and changes string in the buffer buf and
- * splits it into two mandatory and one optional substrings.
- *
- * @buf: complete string from attribute 'add_channel'
- * @a: address of pointer to 1st substring (=instance name)
- * @b: address of pointer to 2nd substring (=channel name)
- * @c: optional address of pointer to 3rd substring (=user defined name)
- *
- * Examples:
- *
- * Input: "mdev0:ch6:my_channel\n" or
- *        "mdev0:ch6:my_channel"
- *
- * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
- *
- * Input: "mdev1:ep81\n"
- * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
- *
- * Input: "mdev1:ep81"
- * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
- */
-static int split_string(char *buf, char **a, char **b, char **c)
-{
-	*a = strsep(&buf, ":");
-	if (!*a)
-		return -EIO;
-
-	*b = strsep(&buf, ":\n");
-	if (!*b)
-		return -EIO;
-
-	if (c)
-		*c = strsep(&buf, ":\n");
-
-	return 0;
-}
-
-/**
- * get_channel_by_name - get pointer to channel object
- * @mdev: name of the device instance
- * @mdev_ch: name of the respective channel
- *
- * This retrieves the pointer to a channel object.
- */
-static struct
-most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
-{
-	struct most_c_obj *c, *tmp;
-	struct most_inst_obj *i, *i_tmp;
-	int found = 0;
-
-	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
-		if (!strcmp(kobject_name(&i->kobj), mdev)) {
-			found++;
-			break;
-		}
-	}
-	if (unlikely(!found))
-		return ERR_PTR(-EIO);
-
-	list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
-		if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
-			found++;
-			break;
-		}
-	}
-	if (unlikely(found < 2))
-		return ERR_PTR(-EIO);
-	return c;
-}
-
-/**
- * add_link_store - store() function for add_link attribute
- * @aim_obj: pointer to AIM object
- * @attr: its attributes
- * @buf: buffer
- * @len: buffer length
- *
- * This parses the string given by buf and splits it into
- * three substrings. Note: third substring is optional. In case a cdev
- * AIM is loaded the optional 3rd substring will make up the name of
- * device node in the /dev directory. If omitted, the device node will
- * inherit the channel's name within sysfs.
- *
- * Searches for a pair of device and channel and probes the AIM
- *
- * Example:
- * (1) echo "mdev0:ch6:my_rxchannel" >add_link
- * (2) echo "mdev1:ep81" >add_link
- *
- * (1) would create the device node /dev/my_rxchannel
- * (2) would create the device node /dev/mdev1-ep81
- */
-static ssize_t add_link_store(struct most_aim_obj *aim_obj,
-			      struct most_aim_attribute *attr,
-			      const char *buf,
-			      size_t len)
-{
-	struct most_c_obj *c;
-	struct most_aim **aim_ptr;
-	char buffer[STRING_SIZE];
-	char *mdev;
-	char *mdev_ch;
-	char *mdev_devnod;
-	char devnod_buf[STRING_SIZE];
-	int ret;
-	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
-
-	strlcpy(buffer, buf, max_len);
-
-	ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
-	if (ret)
-		return ret;
-
-	if (!mdev_devnod || *mdev_devnod == 0) {
-		snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
-			 mdev_ch);
-		mdev_devnod = devnod_buf;
-	}
-
-	c = get_channel_by_name(mdev, mdev_ch);
-	if (IS_ERR(c))
-		return -ENODEV;
-
-	if (!c->aim0.ptr)
-		aim_ptr = &c->aim0.ptr;
-	else if (!c->aim1.ptr)
-		aim_ptr = &c->aim1.ptr;
-	else
-		return -ENOSPC;
-
-	*aim_ptr = aim_obj->driver;
-	ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
-					     &c->cfg, &c->kobj, mdev_devnod);
-	if (ret) {
-		*aim_ptr = NULL;
-		return ret;
-	}
-
-	return len;
-}
-
-/**
- * remove_link_store - store function for remove_link attribute
- * @aim_obj: pointer to AIM object
- * @attr: its attributes
- * @buf: buffer
- * @len: buffer length
- *
- * Example:
- * echo "mdev0:ep81" >remove_link
- */
-static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
-				 struct most_aim_attribute *attr,
-				 const char *buf,
-				 size_t len)
-{
-	struct most_c_obj *c;
-	char buffer[STRING_SIZE];
-	char *mdev;
-	char *mdev_ch;
-	int ret;
-	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
-
-	strlcpy(buffer, buf, max_len);
-	ret = split_string(buffer, &mdev, &mdev_ch, NULL);
-	if (ret)
-		return ret;
-
-	c = get_channel_by_name(mdev, mdev_ch);
-	if (IS_ERR(c))
-		return -ENODEV;
-
-	if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
-		return -EIO;
-	if (c->aim0.ptr == aim_obj->driver)
-		c->aim0.ptr = NULL;
-	if (c->aim1.ptr == aim_obj->driver)
-		c->aim1.ptr = NULL;
-	return len;
-}
-
-static struct most_aim_attribute most_aim_attrs[] = {
-	__ATTR_RO(links),
-	__ATTR_WO(add_link),
-	__ATTR_WO(remove_link),
-};
-
-static struct attribute *most_aim_def_attrs[] = {
-	&most_aim_attrs[0].attr,
-	&most_aim_attrs[1].attr,
-	&most_aim_attrs[2].attr,
-	NULL,
-};
-
-static struct kobj_type most_aim_ktype = {
-	.sysfs_ops = &most_aim_sysfs_ops,
-	.release = most_aim_release,
-	.default_attrs = most_aim_def_attrs,
-};
-
-static struct kset *most_aim_kset;
-
-/**
- * create_most_aim_obj - creates an AIM object
- * @name: name of the AIM
- *
- * This creates an AIM object assigns the proper kset and registers
- * it with sysfs.
- * Returns a pointer to the object or NULL if something went wrong.
- */
-static struct most_aim_obj *create_most_aim_obj(const char *name)
-{
-	struct most_aim_obj *most_aim;
-	int retval;
-
-	most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
-	if (!most_aim)
-		return NULL;
-	most_aim->kobj.kset = most_aim_kset;
-	retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
-				      NULL, "%s", name);
-	if (retval) {
-		kobject_put(&most_aim->kobj);
-		return NULL;
-	}
-	kobject_uevent(&most_aim->kobj, KOBJ_ADD);
-	return most_aim;
-}
-
-/**
- * destroy_most_aim_obj - AIM release function
- * @p: pointer to AIM object
- *
- * This decrements the reference counter of the AIM object. If the
- * reference count turns zero, its release function will be called.
- */
-static void destroy_most_aim_obj(struct most_aim_obj *p)
-{
-	kobject_put(&p->kobj);
-}
-
-/*		     ___       ___
- *		     ___C O R E___
- */
-
-/**
- * Instantiation of the MOST bus
- */
-static struct bus_type most_bus = {
-	.name = "most",
-};
-
-/**
- * Instantiation of the core driver
- */
-static struct device_driver mostcore = {
-	.name = "mostcore",
-	.bus = &most_bus,
-};
-
-static inline void trash_mbo(struct mbo *mbo)
-{
-	unsigned long flags;
-	struct most_c_obj *c = mbo->context;
-
-	spin_lock_irqsave(&c->fifo_lock, flags);
-	list_add(&mbo->list, &c->trash_fifo);
-	spin_unlock_irqrestore(&c->fifo_lock, flags);
-}
-
-static bool hdm_mbo_ready(struct most_c_obj *c)
-{
-	bool empty;
-
-	if (c->enqueue_halt)
-		return false;
-
-	spin_lock_irq(&c->fifo_lock);
-	empty = list_empty(&c->halt_fifo);
-	spin_unlock_irq(&c->fifo_lock);
-
-	return !empty;
-}
-
-static void nq_hdm_mbo(struct mbo *mbo)
-{
-	unsigned long flags;
-	struct most_c_obj *c = mbo->context;
-
-	spin_lock_irqsave(&c->fifo_lock, flags);
-	list_add_tail(&mbo->list, &c->halt_fifo);
-	spin_unlock_irqrestore(&c->fifo_lock, flags);
-	wake_up_interruptible(&c->hdm_fifo_wq);
-}
-
-static int hdm_enqueue_thread(void *data)
-{
-	struct most_c_obj *c = data;
-	struct mbo *mbo;
-	int ret;
-	typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
-
-	while (likely(!kthread_should_stop())) {
-		wait_event_interruptible(c->hdm_fifo_wq,
-					 hdm_mbo_ready(c) ||
-					 kthread_should_stop());
-
-		mutex_lock(&c->nq_mutex);
-		spin_lock_irq(&c->fifo_lock);
-		if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
-			spin_unlock_irq(&c->fifo_lock);
-			mutex_unlock(&c->nq_mutex);
-			continue;
-		}
-
-		mbo = list_pop_mbo(&c->halt_fifo);
-		spin_unlock_irq(&c->fifo_lock);
-
-		if (c->cfg.direction == MOST_CH_RX)
-			mbo->buffer_length = c->cfg.buffer_size;
-
-		ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
-		mutex_unlock(&c->nq_mutex);
-
-		if (unlikely(ret)) {
-			pr_err("hdm enqueue failed\n");
-			nq_hdm_mbo(mbo);
-			c->hdm_enqueue_task = NULL;
-			return 0;
-		}
-	}
-
-	return 0;
-}
-
-static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
-{
-	struct task_struct *task =
-		kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
-			    channel_id);
-
-	if (IS_ERR(task))
-		return PTR_ERR(task);
-
-	c->hdm_enqueue_task = task;
-	return 0;
-}
-
-/**
- * arm_mbo - recycle MBO for further usage
- * @mbo: buffer object
- *
- * This puts an MBO back to the list to have it ready for up coming
- * tx transactions.
- *
- * In case the MBO belongs to a channel that recently has been
- * poisoned, the MBO is scheduled to be trashed.
- * Calls the completion handler of an attached AIM.
- */
-static void arm_mbo(struct mbo *mbo)
-{
-	unsigned long flags;
-	struct most_c_obj *c;
-
-	BUG_ON((!mbo) || (!mbo->context));
-	c = mbo->context;
-
-	if (c->is_poisoned) {
-		trash_mbo(mbo);
-		return;
-	}
-
-	spin_lock_irqsave(&c->fifo_lock, flags);
-	++*mbo->num_buffers_ptr;
-	list_add_tail(&mbo->list, &c->fifo);
-	spin_unlock_irqrestore(&c->fifo_lock, flags);
-
-	if (c->aim0.refs && c->aim0.ptr->tx_completion)
-		c->aim0.ptr->tx_completion(c->iface, c->channel_id);
-
-	if (c->aim1.refs && c->aim1.ptr->tx_completion)
-		c->aim1.ptr->tx_completion(c->iface, c->channel_id);
-}
-
-/**
- * arm_mbo_chain - helper function that arms an MBO chain for the HDM
- * @c: pointer to interface channel
- * @dir: direction of the channel
- * @compl: pointer to completion function
- *
- * This allocates buffer objects including the containing DMA coherent
- * buffer and puts them in the fifo.
- * Buffers of Rx channels are put in the kthread fifo, hence immediately
- * submitted to the HDM.
- *
- * Returns the number of allocated and enqueued MBOs.
- */
-static int arm_mbo_chain(struct most_c_obj *c, int dir,
-			 void (*compl)(struct mbo *))
-{
-	unsigned int i;
-	int retval;
-	struct mbo *mbo;
-	u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
-
-	atomic_set(&c->mbo_nq_level, 0);
-
-	for (i = 0; i < c->cfg.num_buffers; i++) {
-		mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
-		if (!mbo) {
-			retval = i;
-			goto _exit;
-		}
-		mbo->context = c;
-		mbo->ifp = c->iface;
-		mbo->hdm_channel_id = c->channel_id;
-		mbo->virt_address = dma_alloc_coherent(NULL,
-						       coherent_buf_size,
-						       &mbo->bus_address,
-						       GFP_KERNEL);
-		if (!mbo->virt_address) {
-			pr_info("WARN: No DMA coherent buffer.\n");
-			retval = i;
-			goto _error1;
-		}
-		mbo->complete = compl;
-		mbo->num_buffers_ptr = &dummy_num_buffers;
-		if (dir == MOST_CH_RX) {
-			nq_hdm_mbo(mbo);
-			atomic_inc(&c->mbo_nq_level);
-		} else {
-			arm_mbo(mbo);
-		}
-	}
-	return i;
-
-_error1:
-	kfree(mbo);
-_exit:
-	return retval;
-}
-
-/**
- * most_submit_mbo - submits an MBO to fifo
- * @mbo: pointer to the MBO
- */
-void most_submit_mbo(struct mbo *mbo)
-{
-	if (WARN_ONCE(!mbo || !mbo->context,
-		      "bad mbo or missing channel reference\n"))
-		return;
-
-	nq_hdm_mbo(mbo);
-}
-EXPORT_SYMBOL_GPL(most_submit_mbo);
-
-/**
- * most_write_completion - write completion handler
- * @mbo: pointer to MBO
- *
- * This recycles the MBO for further usage. In case the channel has been
- * poisoned, the MBO is scheduled to be trashed.
- */
-static void most_write_completion(struct mbo *mbo)
-{
-	struct most_c_obj *c;
-
-	BUG_ON((!mbo) || (!mbo->context));
-
-	c = mbo->context;
-	if (mbo->status == MBO_E_INVAL)
-		pr_info("WARN: Tx MBO status: invalid\n");
-	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
-		trash_mbo(mbo);
-	else
-		arm_mbo(mbo);
-}
-
-/**
- * get_channel_by_iface - get pointer to channel object
- * @iface: pointer to interface instance
- * @id: channel ID
- *
- * This retrieves a pointer to a channel of the given interface and channel ID.
- */
-static struct
-most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
-{
-	struct most_inst_obj *i;
-
-	if (unlikely(!iface)) {
-		pr_err("Bad interface\n");
-		return NULL;
-	}
-	if (unlikely((id < 0) || (id >= iface->num_channels))) {
-		pr_err("Channel index (%d) out of range\n", id);
-		return NULL;
-	}
-	i = iface->priv;
-	if (unlikely(!i)) {
-		pr_err("interface is not registered\n");
-		return NULL;
-	}
-	return i->channel[id];
-}
-
-int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
-{
-	struct most_c_obj *c = get_channel_by_iface(iface, id);
-	unsigned long flags;
-	int empty;
-
-	if (unlikely(!c))
-		return -EINVAL;
-
-	if (c->aim0.refs && c->aim1.refs &&
-	    ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
-	     (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
-		return 0;
-
-	spin_lock_irqsave(&c->fifo_lock, flags);
-	empty = list_empty(&c->fifo);
-	spin_unlock_irqrestore(&c->fifo_lock, flags);
-	return !empty;
-}
-EXPORT_SYMBOL_GPL(channel_has_mbo);
-
-/**
- * most_get_mbo - get pointer to an MBO of pool
- * @iface: pointer to interface instance
- * @id: channel ID
- *
- * This attempts to get a free buffer out of the channel fifo.
- * Returns a pointer to MBO on success or NULL otherwise.
- */
-struct mbo *most_get_mbo(struct most_interface *iface, int id,
-			 struct most_aim *aim)
-{
-	struct mbo *mbo;
-	struct most_c_obj *c;
-	unsigned long flags;
-	int *num_buffers_ptr;
-
-	c = get_channel_by_iface(iface, id);
-	if (unlikely(!c))
-		return NULL;
-
-	if (c->aim0.refs && c->aim1.refs &&
-	    ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
-	     (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
-		return NULL;
-
-	if (aim == c->aim0.ptr)
-		num_buffers_ptr = &c->aim0.num_buffers;
-	else if (aim == c->aim1.ptr)
-		num_buffers_ptr = &c->aim1.num_buffers;
-	else
-		num_buffers_ptr = &dummy_num_buffers;
-
-	spin_lock_irqsave(&c->fifo_lock, flags);
-	if (list_empty(&c->fifo)) {
-		spin_unlock_irqrestore(&c->fifo_lock, flags);
-		return NULL;
-	}
-	mbo = list_pop_mbo(&c->fifo);
-	--*num_buffers_ptr;
-	spin_unlock_irqrestore(&c->fifo_lock, flags);
-
-	mbo->num_buffers_ptr = num_buffers_ptr;
-	mbo->buffer_length = c->cfg.buffer_size;
-	return mbo;
-}
-EXPORT_SYMBOL_GPL(most_get_mbo);
-
-/**
- * most_put_mbo - return buffer to pool
- * @mbo: buffer object
- */
-void most_put_mbo(struct mbo *mbo)
-{
-	struct most_c_obj *c = mbo->context;
-
-	if (c->cfg.direction == MOST_CH_TX) {
-		arm_mbo(mbo);
-		return;
-	}
-	nq_hdm_mbo(mbo);
-	atomic_inc(&c->mbo_nq_level);
-}
-EXPORT_SYMBOL_GPL(most_put_mbo);
-
-/**
- * most_read_completion - read completion handler
- * @mbo: pointer to MBO
- *
- * This function is called by the HDM when data has been received from the
- * hardware and copied to the buffer of the MBO.
- *
- * In case the channel has been poisoned it puts the buffer in the trash queue.
- * Otherwise, it passes the buffer to an AIM for further processing.
- */
-static void most_read_completion(struct mbo *mbo)
-{
-	struct most_c_obj *c = mbo->context;
-
-	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
-		trash_mbo(mbo);
-		return;
-	}
-
-	if (mbo->status == MBO_E_INVAL) {
-		nq_hdm_mbo(mbo);
-		atomic_inc(&c->mbo_nq_level);
-		return;
-	}
-
-	if (atomic_sub_and_test(1, &c->mbo_nq_level))
-		c->is_starving = 1;
-
-	if (c->aim0.refs && c->aim0.ptr->rx_completion &&
-	    c->aim0.ptr->rx_completion(mbo) == 0)
-		return;
-
-	if (c->aim1.refs && c->aim1.ptr->rx_completion &&
-	    c->aim1.ptr->rx_completion(mbo) == 0)
-		return;
-
-	most_put_mbo(mbo);
-}
-
-/**
- * most_start_channel - prepares a channel for communication
- * @iface: pointer to interface instance
- * @id: channel ID
- *
- * This prepares the channel for usage. Cross-checks whether the
- * channel's been properly configured.
- *
- * Returns 0 on success or error code otherwise.
- */
-int most_start_channel(struct most_interface *iface, int id,
-		       struct most_aim *aim)
-{
-	int num_buffer;
-	int ret;
-	struct most_c_obj *c = get_channel_by_iface(iface, id);
-
-	if (unlikely(!c))
-		return -EINVAL;
-
-	mutex_lock(&c->start_mutex);
-	if (c->aim0.refs + c->aim1.refs > 0)
-		goto out; /* already started by other aim */
-
-	if (!try_module_get(iface->mod)) {
-		pr_info("failed to acquire HDM lock\n");
-		mutex_unlock(&c->start_mutex);
-		return -ENOLCK;
-	}
-
-	c->cfg.extra_len = 0;
-	if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
-		pr_info("channel configuration failed. Go check settings...\n");
-		ret = -EINVAL;
-		goto error;
-	}
-
-	init_waitqueue_head(&c->hdm_fifo_wq);
-
-	if (c->cfg.direction == MOST_CH_RX)
-		num_buffer = arm_mbo_chain(c, c->cfg.direction,
-					   most_read_completion);
-	else
-		num_buffer = arm_mbo_chain(c, c->cfg.direction,
-					   most_write_completion);
-	if (unlikely(!num_buffer)) {
-		pr_info("failed to allocate memory\n");
-		ret = -ENOMEM;
-		goto error;
-	}
-
-	ret = run_enqueue_thread(c, id);
-	if (ret)
-		goto error;
-
-	c->is_starving = 0;
-	c->aim0.num_buffers = c->cfg.num_buffers / 2;
-	c->aim1.num_buffers = c->cfg.num_buffers - c->aim0.num_buffers;
-	atomic_set(&c->mbo_ref, num_buffer);
-
-out:
-	if (aim == c->aim0.ptr)
-		c->aim0.refs++;
-	if (aim == c->aim1.ptr)
-		c->aim1.refs++;
-	mutex_unlock(&c->start_mutex);
-	return 0;
-
-error:
-	module_put(iface->mod);
-	mutex_unlock(&c->start_mutex);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(most_start_channel);
-
-/**
- * most_stop_channel - stops a running channel
- * @iface: pointer to interface instance
- * @id: channel ID
- */
-int most_stop_channel(struct most_interface *iface, int id,
-		      struct most_aim *aim)
-{
-	struct most_c_obj *c;
-
-	if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
-		pr_err("Bad interface or index out of range\n");
-		return -EINVAL;
-	}
-	c = get_channel_by_iface(iface, id);
-	if (unlikely(!c))
-		return -EINVAL;
-
-	mutex_lock(&c->start_mutex);
-	if (c->aim0.refs + c->aim1.refs >= 2)
-		goto out;
-
-	if (c->hdm_enqueue_task)
-		kthread_stop(c->hdm_enqueue_task);
-	c->hdm_enqueue_task = NULL;
-
-	if (iface->mod)
-		module_put(iface->mod);
-
-	c->is_poisoned = true;
-	if (c->iface->poison_channel(c->iface, c->channel_id)) {
-		pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
-		       c->iface->description);
-		mutex_unlock(&c->start_mutex);
-		return -EAGAIN;
-	}
-	flush_trash_fifo(c);
-	flush_channel_fifos(c);
-
-#ifdef CMPL_INTERRUPTIBLE
-	if (wait_for_completion_interruptible(&c->cleanup)) {
-		pr_info("Interrupted while clean up ch %d\n", c->channel_id);
-		mutex_unlock(&c->start_mutex);
-		return -EINTR;
-	}
-#else
-	wait_for_completion(&c->cleanup);
-#endif
-	c->is_poisoned = false;
-
-out:
-	if (aim == c->aim0.ptr)
-		c->aim0.refs--;
-	if (aim == c->aim1.ptr)
-		c->aim1.refs--;
-	mutex_unlock(&c->start_mutex);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(most_stop_channel);
-
-/**
- * most_register_aim - registers an AIM (driver) with the core
- * @aim: instance of AIM to be registered
- */
-int most_register_aim(struct most_aim *aim)
-{
-	struct most_aim_obj *aim_obj;
-
-	if (!aim) {
-		pr_err("Bad driver\n");
-		return -EINVAL;
-	}
-	aim_obj = create_most_aim_obj(aim->name);
-	if (!aim_obj) {
-		pr_info("failed to alloc driver object\n");
-		return -ENOMEM;
-	}
-	aim_obj->driver = aim;
-	aim->context = aim_obj;
-	pr_info("registered new application interfacing module %s\n",
-		aim->name);
-	list_add_tail(&aim_obj->list, &aim_list);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(most_register_aim);
-
-/**
- * most_deregister_aim - deregisters an AIM (driver) with the core
- * @aim: AIM to be removed
- */
-int most_deregister_aim(struct most_aim *aim)
-{
-	struct most_aim_obj *aim_obj;
-	struct most_c_obj *c, *tmp;
-	struct most_inst_obj *i, *i_tmp;
-
-	if (!aim) {
-		pr_err("Bad driver\n");
-		return -EINVAL;
-	}
-
-	aim_obj = aim->context;
-	if (!aim_obj) {
-		pr_info("driver not registered.\n");
-		return -EINVAL;
-	}
-	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
-		list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
-			if (c->aim0.ptr == aim || c->aim1.ptr == aim)
-				aim->disconnect_channel(
-					c->iface, c->channel_id);
-			if (c->aim0.ptr == aim)
-				c->aim0.ptr = NULL;
-			if (c->aim1.ptr == aim)
-				c->aim1.ptr = NULL;
-		}
-	}
-	list_del(&aim_obj->list);
-	destroy_most_aim_obj(aim_obj);
-	pr_info("deregistering application interfacing module %s\n", aim->name);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(most_deregister_aim);
-
-/**
- * most_register_interface - registers an interface with core
- * @iface: pointer to the instance of the interface description.
- *
- * Allocates and initializes a new interface instance and all of its channels.
- * Returns a pointer to kobject or an error pointer.
- */
-struct kobject *most_register_interface(struct most_interface *iface)
-{
-	unsigned int i;
-	int id;
-	char name[STRING_SIZE];
-	char channel_name[STRING_SIZE];
-	struct most_c_obj *c;
-	struct most_inst_obj *inst;
-
-	if (!iface || !iface->enqueue || !iface->configure ||
-	    !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
-		pr_err("Bad interface or channel overflow\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
-	if (id < 0) {
-		pr_info("Failed to alloc mdev ID\n");
-		return ERR_PTR(id);
-	}
-	snprintf(name, STRING_SIZE, "mdev%d", id);
-
-	inst = create_most_inst_obj(name);
-	if (!inst) {
-		pr_info("Failed to allocate interface instance\n");
-		ida_simple_remove(&mdev_id, id);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	iface->priv = inst;
-	INIT_LIST_HEAD(&inst->channel_list);
-	inst->iface = iface;
-	inst->dev_id = id;
-	list_add_tail(&inst->list, &instance_list);
-
-	for (i = 0; i < iface->num_channels; i++) {
-		const char *name_suffix = iface->channel_vector[i].name_suffix;
-
-		if (!name_suffix)
-			snprintf(channel_name, STRING_SIZE, "ch%d", i);
-		else
-			snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
-
-		/* this increments the reference count of this instance */
-		c = create_most_c_obj(channel_name, &inst->kobj);
-		if (!c)
-			goto free_instance;
-		inst->channel[i] = c;
-		c->is_starving = 0;
-		c->iface = iface;
-		c->inst = inst;
-		c->channel_id = i;
-		c->keep_mbo = false;
-		c->enqueue_halt = false;
-		c->is_poisoned = false;
-		c->cfg.direction = 0;
-		c->cfg.data_type = 0;
-		c->cfg.num_buffers = 0;
-		c->cfg.buffer_size = 0;
-		c->cfg.subbuffer_size = 0;
-		c->cfg.packets_per_xact = 0;
-		spin_lock_init(&c->fifo_lock);
-		INIT_LIST_HEAD(&c->fifo);
-		INIT_LIST_HEAD(&c->trash_fifo);
-		INIT_LIST_HEAD(&c->halt_fifo);
-		init_completion(&c->cleanup);
-		atomic_set(&c->mbo_ref, 0);
-		mutex_init(&c->start_mutex);
-		mutex_init(&c->nq_mutex);
-		list_add_tail(&c->list, &inst->channel_list);
-	}
-	pr_info("registered new MOST device mdev%d (%s)\n",
-		inst->dev_id, iface->description);
-	return &inst->kobj;
-
-free_instance:
-	pr_info("Failed allocate channel(s)\n");
-	list_del(&inst->list);
-	ida_simple_remove(&mdev_id, id);
-	destroy_most_inst_obj(inst);
-	return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL_GPL(most_register_interface);
-
-/**
- * most_deregister_interface - deregisters an interface with core
- * @iface: pointer to the interface instance description.
- *
- * Before removing an interface instance from the list, all running
- * channels are stopped and poisoned.
- */
-void most_deregister_interface(struct most_interface *iface)
-{
-	struct most_inst_obj *i = iface->priv;
-	struct most_c_obj *c;
-
-	if (unlikely(!i)) {
-		pr_info("Bad Interface\n");
-		return;
-	}
-	pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
-		iface->description);
-
-	list_for_each_entry(c, &i->channel_list, list) {
-		if (c->aim0.ptr)
-			c->aim0.ptr->disconnect_channel(c->iface,
-							c->channel_id);
-		if (c->aim1.ptr)
-			c->aim1.ptr->disconnect_channel(c->iface,
-							c->channel_id);
-		c->aim0.ptr = NULL;
-		c->aim1.ptr = NULL;
-	}
-
-	ida_simple_remove(&mdev_id, i->dev_id);
-	list_del(&i->list);
-	destroy_most_inst_obj(i);
-}
-EXPORT_SYMBOL_GPL(most_deregister_interface);
-
-/**
- * most_stop_enqueue - prevents core from enqueueing MBOs
- * @iface: pointer to interface
- * @id: channel id
- *
- * This is called by an HDM that _cannot_ attend to its duties and
- * is imminent to get run over by the core. The core is not going to
- * enqueue any further packets unless the flagging HDM calls
- * most_resume enqueue().
- */
-void most_stop_enqueue(struct most_interface *iface, int id)
-{
-	struct most_c_obj *c = get_channel_by_iface(iface, id);
-
-	if (!c)
-		return;
-
-	mutex_lock(&c->nq_mutex);
-	c->enqueue_halt = true;
-	mutex_unlock(&c->nq_mutex);
-}
-EXPORT_SYMBOL_GPL(most_stop_enqueue);
-
-/**
- * most_resume_enqueue - allow core to enqueue MBOs again
- * @iface: pointer to interface
- * @id: channel id
- *
- * This clears the enqueue halt flag and enqueues all MBOs currently
- * sitting in the wait fifo.
- */
-void most_resume_enqueue(struct most_interface *iface, int id)
-{
-	struct most_c_obj *c = get_channel_by_iface(iface, id);
-
-	if (!c)
-		return;
-
-	mutex_lock(&c->nq_mutex);
-	c->enqueue_halt = false;
-	mutex_unlock(&c->nq_mutex);
-
-	wake_up_interruptible(&c->hdm_fifo_wq);
-}
-EXPORT_SYMBOL_GPL(most_resume_enqueue);
-
-static int __init most_init(void)
-{
-	int err;
-
-	pr_info("init()\n");
-	INIT_LIST_HEAD(&instance_list);
-	INIT_LIST_HEAD(&aim_list);
-	ida_init(&mdev_id);
-
-	err = bus_register(&most_bus);
-	if (err) {
-		pr_info("Cannot register most bus\n");
-		return err;
-	}
-
-	most_class = class_create(THIS_MODULE, "most");
-	if (IS_ERR(most_class)) {
-		pr_info("No udev support.\n");
-		err = PTR_ERR(most_class);
-		goto exit_bus;
-	}
-
-	err = driver_register(&mostcore);
-	if (err) {
-		pr_info("Cannot register core driver\n");
-		goto exit_class;
-	}
-
-	core_dev = device_create(most_class, NULL, 0, NULL, "mostcore");
-	if (IS_ERR(core_dev)) {
-		err = PTR_ERR(core_dev);
-		goto exit_driver;
-	}
-
-	most_aim_kset = kset_create_and_add("aims", NULL, &core_dev->kobj);
-	if (!most_aim_kset) {
-		err = -ENOMEM;
-		goto exit_class_container;
-	}
-
-	most_inst_kset = kset_create_and_add("devices", NULL, &core_dev->kobj);
-	if (!most_inst_kset) {
-		err = -ENOMEM;
-		goto exit_driver_kset;
-	}
-
-	return 0;
-
-exit_driver_kset:
-	kset_unregister(most_aim_kset);
-exit_class_container:
-	device_destroy(most_class, 0);
-exit_driver:
-	driver_unregister(&mostcore);
-exit_class:
-	class_destroy(most_class);
-exit_bus:
-	bus_unregister(&most_bus);
-	return err;
-}
-
-static void __exit most_exit(void)
-{
-	struct most_inst_obj *i, *i_tmp;
-	struct most_aim_obj *d, *d_tmp;
-
-	pr_info("exit core module\n");
-	list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
-		destroy_most_aim_obj(d);
-	}
-
-	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
-		list_del(&i->list);
-		destroy_most_inst_obj(i);
-	}
-	kset_unregister(most_inst_kset);
-	kset_unregister(most_aim_kset);
-	device_destroy(most_class, 0);
-	driver_unregister(&mostcore);
-	class_destroy(most_class);
-	bus_unregister(&most_bus);
-	ida_destroy(&mdev_id);
-}
-
-module_init(most_init);
-module_exit(most_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Gromm <christian.gromm@xxxxxxxxxxxxx>");
-MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h
deleted file mode 100644
index 915e515..0000000
--- a/drivers/staging/most/mostcore/mostcore.h
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * mostcore.h - Interface between MostCore,
- *   Hardware Dependent Module (HDM) and Application Interface Module (AIM).
- *
- * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This file is licensed under GPLv2.
- */
-
-/*
- * Authors:
- *   Andrey Shvetsov <andrey.shvetsov@xxxxxx>
- *   Christian Gromm <christian.gromm@xxxxxxxxxxxxx>
- *   Sebastian Graf
- */
-
-#ifndef __MOST_CORE_H__
-#define __MOST_CORE_H__
-
-#include <linux/types.h>
-
-struct kobject;
-struct module;
-
-/**
- * Interface type
- */
-enum most_interface_type {
-	ITYPE_LOOPBACK = 1,
-	ITYPE_I2C,
-	ITYPE_I2S,
-	ITYPE_TSI,
-	ITYPE_HBI,
-	ITYPE_MEDIALB_DIM,
-	ITYPE_MEDIALB_DIM2,
-	ITYPE_USB,
-	ITYPE_PCIE
-};
-
-/**
- * Channel direction.
- */
-enum most_channel_direction {
-	MOST_CH_RX = 1 << 0,
-	MOST_CH_TX = 1 << 1,
-};
-
-/**
- * Channel data type.
- */
-enum most_channel_data_type {
-	MOST_CH_CONTROL = 1 << 0,
-	MOST_CH_ASYNC = 1 << 1,
-	MOST_CH_ISOC = 1 << 2,
-	MOST_CH_SYNC = 1 << 5,
-};
-
-enum mbo_status_flags {
-	/* MBO was processed successfully (data was send or received )*/
-	MBO_SUCCESS = 0,
-	/* The MBO contains wrong or missing information.  */
-	MBO_E_INVAL,
-	/* MBO was completed as HDM Channel will be closed */
-	MBO_E_CLOSE,
-};
-
-/**
- * struct most_channel_capability - Channel capability
- * @direction: Supported channel directions.
- * The value is bitwise OR-combination of the values from the
- * enumeration most_channel_direction. Zero is allowed value and means
- * "channel may not be used".
- * @data_type: Supported channel data types.
- * The value is bitwise OR-combination of the values from the
- * enumeration most_channel_data_type. Zero is allowed value and means
- * "channel may not be used".
- * @num_buffer_packet: Maximum number of buffers supported by this channel
- * for packet data types (Async,Control,QoS)
- * @buffer_size_packet: Maximum buffer size supported by this channel
- * for packet data types (Async,Control,QoS)
- * @num_buffer_streaming: Maximum number of buffers supported by this channel
- * for streaming data types (Sync,AV Packetized)
- * @buffer_size_streaming: Maximum buffer size supported by this channel
- * for streaming data types (Sync,AV Packetized)
- * @name_suffix: Optional suffix providean by an HDM that is attached to the
- * regular channel name.
- *
- * Describes the capabilities of a MostCore channel like supported Data Types
- * and directions. This information is provided by an HDM for the MostCore.
- *
- * The Core creates read only sysfs attribute files in
- * /sys/devices/virtual/most/mostcore/devices/mdev-#/mdev#-ch#/ with the
- * following attributes:
- *	-available_directions
- *	-available_datatypes
- *	-number_of_packet_buffers
- *	-number_of_stream_buffers
- *	-size_of_packet_buffer
- *	-size_of_stream_buffer
- * where content of each file is a string with all supported properties of this
- * very channel attribute.
- */
-struct most_channel_capability {
-	u16 direction;
-	u16 data_type;
-	u16 num_buffers_packet;
-	u16 buffer_size_packet;
-	u16 num_buffers_streaming;
-	u16 buffer_size_streaming;
-	const char *name_suffix;
-};
-
-/**
- * struct most_channel_config - stores channel configuration
- * @direction: direction of the channel
- * @data_type: data type travelling over this channel
- * @num_buffers: number of buffers
- * @buffer_size: size of a buffer for AIM.
- * Buffer size may be cutted down by HDM in a configure callback
- * to match to a given interface and channel type.
- * @extra_len: additional buffer space for internal HDM purposes like padding.
- * May be set by HDM in a configure callback if needed.
- * @subbuffer_size: size of a subbuffer
- * @packets_per_xact: number of MOST frames that are packet inside one USB
- *		      packet. This is USB specific
- *
- * Describes the configuration for a MostCore channel. This information is
- * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
- * parameter of the "configure" function call.
- */
-struct most_channel_config {
-	enum most_channel_direction direction;
-	enum most_channel_data_type data_type;
-	u16 num_buffers;
-	u16 buffer_size;
-	u16 extra_len;
-	u16 subbuffer_size;
-	u16 packets_per_xact;
-};
-
-/*
- * struct mbo - MOST Buffer Object.
- * @context: context for core completion handler
- * @priv: private data for HDM
- *
- *	public: documented fields that are used for the communications
- *	between MostCore and HDMs
- *
- * @list: list head for use by the mbo's current owner
- * @ifp: (in) associated interface instance
- * @hdm_channel_id: (in) HDM channel instance
- * @virt_address: (in) kernel virtual address of the buffer
- * @bus_address: (in) bus address of the buffer
- * @buffer_length: (in) buffer payload length
- * @processed_length: (out) processed length
- * @status: (out) transfer status
- * @complete: (in) completion routine
- *
- * The MostCore allocates and initializes the MBO.
- *
- * The HDM receives MBO for transfer from MostCore with the call to enqueue().
- * The HDM copies the data to- or from the buffer depending on configured
- * channel direction, set "processed_length" and "status" and completes
- * the transfer procedure by calling the completion routine.
- *
- * At the end the MostCore deallocates the MBO or recycles it for further
- * transfers for the same or different HDM.
- *
- * Directions of usage:
- * The core driver should never access any MBO fields (even if marked
- * as "public") while the MBO is owned by an HDM. The ownership starts with
- * the call of enqueue() and ends with the call of its complete() routine.
- *
- *					II.
- * Every HDM attached to the core driver _must_ ensure that it returns any MBO
- * it owns (due to a previous call to enqueue() by the core driver) before it
- * de-registers an interface or gets unloaded from the kernel. If this direction
- * is violated memory leaks will occur, since the core driver does _not_ track
- * MBOs it is currently not in control of.
- *
- */
-struct mbo {
-	void *context;
-	void *priv;
-	struct list_head list;
-	struct most_interface *ifp;
-	int *num_buffers_ptr;
-	u16 hdm_channel_id;
-	void *virt_address;
-	dma_addr_t bus_address;
-	u16 buffer_length;
-	u16 processed_length;
-	enum mbo_status_flags status;
-	void (*complete)(struct mbo *);
-};
-
-/**
- * Interface instance description.
- *
- * Describes one instance of an interface like Medusa PCIe or Vantage USB.
- * This structure is allocated and initialized in the HDM. MostCore may not
- * modify this structure.
- *
- * @interface Interface type. \sa most_interface_type.
- * @description PRELIMINARY.
- *   Unique description of the device instance from point of view of the
- *   interface in free text form (ASCII).
- *   It may be a hexadecimal presentation of the memory address for the MediaLB
- *   IP or USB device ID with USB properties for USB interface, etc.
- * @num_channels Number of channels and size of the channel_vector.
- * @channel_vector Properties of the channels.
- *   Array index represents channel ID by the driver.
- * @configure Callback to change data type for the channel of the
- *   interface instance. May be zero if the instance of the interface is not
- *   configurable. Parameter channel_config describes direction and data
- *   type for the channel, configured by the higher level. The content of
- * @enqueue Delivers MBO to the HDM for processing.
- *   After HDM completes Rx- or Tx- operation the processed MBO shall
- *   be returned back to the MostCore using completion routine.
- *   The reason to get the MBO delivered from the MostCore after the channel
- *   is poisoned is the re-opening of the channel by the application.
- *   In this case the HDM shall hold MBOs and service the channel as usual.
- *   The HDM must be able to hold at least one MBO for each channel.
- *   The callback returns a negative value on error, otherwise 0.
- * @poison_channel Informs HDM about closing the channel. The HDM shall
- *   cancel all transfers and synchronously or asynchronously return
- *   all enqueued for this channel MBOs using the completion routine.
- *   The callback returns a negative value on error, otherwise 0.
- * @request_netinfo: triggers retrieving of network info from the HDM by
- *   means of "Message exchange over MDP/MEP"
- *   The call of the function request_netinfo with the parameter on_netinfo as
- *   NULL prohibits use of the previously obtained function pointer.
- * @priv Private field used by mostcore to store context information.
- */
-struct most_interface {
-	struct module *mod;
-	enum most_interface_type interface;
-	const char *description;
-	int num_channels;
-	struct most_channel_capability *channel_vector;
-	int (*configure)(struct most_interface *iface, int channel_idx,
-			 struct most_channel_config *channel_config);
-	int (*enqueue)(struct most_interface *iface, int channel_idx,
-		       struct mbo *mbo);
-	int (*poison_channel)(struct most_interface *iface, int channel_idx);
-	void (*request_netinfo)(struct most_interface *iface, int channel_idx,
-				void (*on_netinfo)(struct most_interface *iface,
-						   unsigned char link_stat,
-						   unsigned char *mac_addr));
-	void *priv;
-};
-
-/**
- * struct most_aim - identifies MOST device driver to mostcore
- * @name: Driver name
- * @probe_channel: function for core to notify driver about channel connection
- * @disconnect_channel: callback function to disconnect a certain channel
- * @rx_completion: completion handler for received packets
- * @tx_completion: completion handler for transmitted packets
- * @context: context pointer to be used by mostcore
- */
-struct most_aim {
-	const char *name;
-	int (*probe_channel)(struct most_interface *iface, int channel_idx,
-			     struct most_channel_config *cfg,
-			     struct kobject *parent, char *name);
-	int (*disconnect_channel)(struct most_interface *iface,
-				  int channel_idx);
-	int (*rx_completion)(struct mbo *mbo);
-	int (*tx_completion)(struct most_interface *iface, int channel_idx);
-	void *context;
-};
-
-/**
- * most_register_interface - Registers instance of the interface.
- * @iface: Pointer to the interface instance description.
- *
- * Returns a pointer to the kobject of the generated instance.
- *
- * Note: HDM has to ensure that any reference held on the kobj is
- * released before deregistering the interface.
- */
-struct kobject *most_register_interface(struct most_interface *iface);
-
-/**
- * Deregisters instance of the interface.
- * @intf_instance Pointer to the interface instance description.
- */
-void most_deregister_interface(struct most_interface *iface);
-void most_submit_mbo(struct mbo *mbo);
-
-/**
- * most_stop_enqueue - prevents core from enqueing MBOs
- * @iface: pointer to interface
- * @channel_idx: channel index
- */
-void most_stop_enqueue(struct most_interface *iface, int channel_idx);
-
-/**
- * most_resume_enqueue - allow core to enqueue MBOs again
- * @iface: pointer to interface
- * @channel_idx: channel index
- *
- * This clears the enqueue halt flag and enqueues all MBOs currently
- * in wait fifo.
- */
-void most_resume_enqueue(struct most_interface *iface, int channel_idx);
-int most_register_aim(struct most_aim *aim);
-int most_deregister_aim(struct most_aim *aim);
-struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
-			 struct most_aim *);
-void most_put_mbo(struct mbo *mbo);
-int channel_has_mbo(struct most_interface *iface, int channel_idx,
-		    struct most_aim *aim);
-int most_start_channel(struct most_interface *iface, int channel_idx,
-		       struct most_aim *);
-int most_stop_channel(struct most_interface *iface, int channel_idx,
-		      struct most_aim *);
-
-#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/most/net/Kconfig b/drivers/staging/most/net/Kconfig
new file mode 100644
index 0000000..795330b
--- /dev/null
+++ b/drivers/staging/most/net/Kconfig
@@ -0,0 +1,13 @@
+#
+# MOST Networking configuration
+#
+
+config MOST_NET
+	tristate "Net"
+	depends on NET
+
+	---help---
+	  Say Y here if you want to commumicate via a networking device.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called most_net.
diff --git a/drivers/staging/most/net/Makefile b/drivers/staging/most/net/Makefile
new file mode 100644
index 0000000..54500aa
--- /dev/null
+++ b/drivers/staging/most/net/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_NET) += most_net.o
+
+most_net-objs := net.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
new file mode 100644
index 0000000..d43aad3
--- /dev/null
+++ b/drivers/staging/most/net/net.c
@@ -0,0 +1,567 @@
+/*
+ * Networking AIM - Networking Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include <most/core.h>
+
+#define MEP_HDR_LEN 8
+#define MDP_HDR_LEN 16
+#define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
+
+#define PMHL 5
+
+#define PMS_TELID_UNSEGM_MAMAC	0x0A
+#define PMS_FIFONO_MDP		0x01
+#define PMS_FIFONO_MEP		0x04
+#define PMS_MSGTYPE_DATA	0x04
+#define PMS_DEF_PRIO		0
+#define MEP_DEF_RETRY		15
+
+#define PMS_FIFONO_MASK		0x07
+#define PMS_FIFONO_SHIFT	3
+#define PMS_RETRY_SHIFT		4
+#define PMS_TELID_MASK		0x0F
+#define PMS_TELID_SHIFT		4
+
+#define HB(value)		((u8)((u16)(value) >> 8))
+#define LB(value)		((u8)(value))
+
+#define EXTRACT_BIT_SET(bitset_name, value) \
+	(((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
+
+#define PMS_IS_MEP(buf, len) \
+	((len) > MEP_HDR_LEN && \
+	 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
+
+#define PMS_IS_MAMAC(buf, len) \
+	((len) > MDP_HDR_LEN && \
+	 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MDP && \
+	 EXTRACT_BIT_SET(PMS_TELID, (buf)[14]) == PMS_TELID_UNSEGM_MAMAC)
+
+struct net_dev_channel {
+	bool linked;
+	int ch_id;
+};
+
+struct net_dev_context {
+	struct most_interface *iface;
+	bool is_mamac;
+	struct net_device *dev;
+	struct net_dev_channel rx;
+	struct net_dev_channel tx;
+	struct list_head list;
+};
+
+static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
+static struct mutex probe_disc_mt; /* ch->linked = true, most_nd_open */
+static struct spinlock list_lock; /* list_head, ch->linked = false, dev_hold */
+static struct most_aim aim;
+
+static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
+{
+	u8 *buff = mbo->virt_address;
+	const u8 broadcast[] = { 0x03, 0xFF };
+	const u8 *dest_addr = skb->data + 4;
+	const u8 *eth_type = skb->data + 12;
+	unsigned int payload_len = skb->len - ETH_HLEN;
+	unsigned int mdp_len = payload_len + MDP_HDR_LEN;
+
+	if (mbo->buffer_length < mdp_len) {
+		pr_err("drop: too small buffer! (%d for %d)\n",
+		       mbo->buffer_length, mdp_len);
+		return -EINVAL;
+	}
+
+	if (skb->len < ETH_HLEN) {
+		pr_err("drop: too small packet! (%d)\n", skb->len);
+		return -EINVAL;
+	}
+
+	if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
+		dest_addr = broadcast;
+
+	*buff++ = HB(mdp_len - 2);
+	*buff++ = LB(mdp_len - 2);
+
+	*buff++ = PMHL;
+	*buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+	*buff++ = PMS_DEF_PRIO;
+	*buff++ = dest_addr[0];
+	*buff++ = dest_addr[1];
+	*buff++ = 0x00;
+
+	*buff++ = HB(payload_len + 6);
+	*buff++ = LB(payload_len + 6);
+
+	/* end of FPH here */
+
+	*buff++ = eth_type[0];
+	*buff++ = eth_type[1];
+	*buff++ = 0;
+	*buff++ = 0;
+
+	*buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
+	*buff++ = LB(payload_len);
+
+	memcpy(buff, skb->data + ETH_HLEN, payload_len);
+	mbo->buffer_length = mdp_len;
+	return 0;
+}
+
+static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
+{
+	u8 *buff = mbo->virt_address;
+	unsigned int mep_len = skb->len + MEP_HDR_LEN;
+
+	if (mbo->buffer_length < mep_len) {
+		pr_err("drop: too small buffer! (%d for %d)\n",
+		       mbo->buffer_length, mep_len);
+		return -EINVAL;
+	}
+
+	*buff++ = HB(mep_len - 2);
+	*buff++ = LB(mep_len - 2);
+
+	*buff++ = PMHL;
+	*buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+	*buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
+	*buff++ = 0;
+	*buff++ = 0;
+	*buff++ = 0;
+
+	memcpy(buff, skb->data, skb->len);
+	mbo->buffer_length = mep_len;
+	return 0;
+}
+
+static int most_nd_set_mac_address(struct net_device *dev, void *p)
+{
+	struct net_dev_context *nd = netdev_priv(dev);
+	int err = eth_mac_addr(dev, p);
+
+	if (err)
+		return err;
+
+	nd->is_mamac =
+		(dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
+		 dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
+
+	/*
+	 * Set default MTU for the given packet type.
+	 * It is still possible to change MTU using ip tools afterwards.
+	 */
+	dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
+
+	return 0;
+}
+
+static void on_netinfo(struct most_interface *iface,
+		       unsigned char link_stat, unsigned char *mac_addr);
+
+static int most_nd_open(struct net_device *dev)
+{
+	struct net_dev_context *nd = netdev_priv(dev);
+	int ret = 0;
+
+	mutex_lock(&probe_disc_mt);
+
+	if (most_start_channel(nd->iface, nd->rx.ch_id, &aim)) {
+		netdev_err(dev, "most_start_channel() failed\n");
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	if (most_start_channel(nd->iface, nd->tx.ch_id, &aim)) {
+		netdev_err(dev, "most_start_channel() failed\n");
+		most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+		ret = -EBUSY;
+		goto unlock;
+	}
+
+	netif_carrier_off(dev);
+	if (is_valid_ether_addr(dev->dev_addr))
+		netif_dormant_off(dev);
+	else
+		netif_dormant_on(dev);
+	netif_wake_queue(dev);
+	if (nd->iface->request_netinfo)
+		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, on_netinfo);
+
+unlock:
+	mutex_unlock(&probe_disc_mt);
+	return ret;
+}
+
+static int most_nd_stop(struct net_device *dev)
+{
+	struct net_dev_context *nd = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	if (nd->iface->request_netinfo)
+		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, NULL);
+	most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+	most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
+
+	return 0;
+}
+
+static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	struct net_dev_context *nd = netdev_priv(dev);
+	struct mbo *mbo;
+	int ret;
+
+	mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &aim);
+
+	if (!mbo) {
+		netif_stop_queue(dev);
+		dev->stats.tx_fifo_errors++;
+		return NETDEV_TX_BUSY;
+	}
+
+	if (nd->is_mamac)
+		ret = skb_to_mamac(skb, mbo);
+	else
+		ret = skb_to_mep(skb, mbo);
+
+	if (ret) {
+		most_put_mbo(mbo);
+		dev->stats.tx_dropped++;
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	most_submit_mbo(mbo);
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops most_nd_ops = {
+	.ndo_open = most_nd_open,
+	.ndo_stop = most_nd_stop,
+	.ndo_start_xmit = most_nd_start_xmit,
+	.ndo_set_mac_address = most_nd_set_mac_address,
+};
+
+static void most_nd_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+	dev->netdev_ops = &most_nd_ops;
+}
+
+static struct net_dev_context *get_net_dev(struct most_interface *iface)
+{
+	struct net_dev_context *nd;
+
+	list_for_each_entry(nd, &net_devices, list)
+		if (nd->iface == iface)
+			return nd;
+	return NULL;
+}
+
+static struct net_dev_context *get_net_dev_hold(struct most_interface *iface)
+{
+	struct net_dev_context *nd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&list_lock, flags);
+	nd = get_net_dev(iface);
+	if (nd && nd->rx.linked && nd->tx.linked)
+		dev_hold(nd->dev);
+	else
+		nd = NULL;
+	spin_unlock_irqrestore(&list_lock, flags);
+	return nd;
+}
+
+static int aim_probe_channel(struct most_interface *iface, int channel_idx,
+			     struct most_channel_config *ccfg,
+			     struct kobject *parent, char *name)
+{
+	struct net_dev_context *nd;
+	struct net_dev_channel *ch;
+	struct net_device *dev;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!iface)
+		return -EINVAL;
+
+	if (ccfg->data_type != MOST_CH_ASYNC)
+		return -EINVAL;
+
+	mutex_lock(&probe_disc_mt);
+	nd = get_net_dev(iface);
+	if (!nd) {
+		dev = alloc_netdev(sizeof(struct net_dev_context), "meth%d",
+				   NET_NAME_UNKNOWN, most_nd_setup);
+		if (!dev) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+
+		nd = netdev_priv(dev);
+		nd->iface = iface;
+		nd->dev = dev;
+
+		spin_lock_irqsave(&list_lock, flags);
+		list_add(&nd->list, &net_devices);
+		spin_unlock_irqrestore(&list_lock, flags);
+
+		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
+	} else {
+		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
+		if (ch->linked) {
+			pr_err("direction is allocated\n");
+			ret = -EINVAL;
+			goto unlock;
+		}
+
+		if (register_netdev(nd->dev)) {
+			pr_err("register_netdev() failed\n");
+			ret = -EINVAL;
+			goto unlock;
+		}
+	}
+	ch->ch_id = channel_idx;
+	ch->linked = true;
+
+unlock:
+	mutex_unlock(&probe_disc_mt);
+	return ret;
+}
+
+static int aim_disconnect_channel(struct most_interface *iface,
+				  int channel_idx)
+{
+	struct net_dev_context *nd;
+	struct net_dev_channel *ch;
+	unsigned long flags;
+	int ret = 0;
+
+	mutex_lock(&probe_disc_mt);
+	nd = get_net_dev(iface);
+	if (!nd) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	if (nd->rx.linked && channel_idx == nd->rx.ch_id) {
+		ch = &nd->rx;
+	} else if (nd->tx.linked && channel_idx == nd->tx.ch_id) {
+		ch = &nd->tx;
+	} else {
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	if (nd->rx.linked && nd->tx.linked) {
+		spin_lock_irqsave(&list_lock, flags);
+		ch->linked = false;
+		spin_unlock_irqrestore(&list_lock, flags);
+
+		/*
+		 * do not call most_stop_channel() here, because channels are
+		 * going to be closed in ndo_stop() after unregister_netdev()
+		 */
+		unregister_netdev(nd->dev);
+	} else {
+		spin_lock_irqsave(&list_lock, flags);
+		list_del(&nd->list);
+		spin_unlock_irqrestore(&list_lock, flags);
+
+		free_netdev(nd->dev);
+	}
+
+unlock:
+	mutex_unlock(&probe_disc_mt);
+	return ret;
+}
+
+static int aim_resume_tx_channel(struct most_interface *iface,
+				 int channel_idx)
+{
+	struct net_dev_context *nd;
+
+	nd = get_net_dev_hold(iface);
+	if (!nd)
+		return 0;
+
+	if (nd->tx.ch_id != channel_idx)
+		goto put_nd;
+
+	netif_wake_queue(nd->dev);
+
+put_nd:
+	dev_put(nd->dev);
+	return 0;
+}
+
+static int aim_rx_data(struct mbo *mbo)
+{
+	const u32 zero = 0;
+	struct net_dev_context *nd;
+	char *buf = mbo->virt_address;
+	u32 len = mbo->processed_length;
+	struct sk_buff *skb;
+	struct net_device *dev;
+	unsigned int skb_len;
+	int ret = 0;
+
+	nd = get_net_dev_hold(mbo->ifp);
+	if (!nd)
+		return -EIO;
+
+	if (nd->rx.ch_id != mbo->hdm_channel_id) {
+		ret = -EIO;
+		goto put_nd;
+	}
+
+	dev = nd->dev;
+
+	if (nd->is_mamac) {
+		if (!PMS_IS_MAMAC(buf, len)) {
+			ret = -EIO;
+			goto put_nd;
+		}
+
+		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
+	} else {
+		if (!PMS_IS_MEP(buf, len)) {
+			ret = -EIO;
+			goto put_nd;
+		}
+
+		skb = dev_alloc_skb(len - MEP_HDR_LEN);
+	}
+
+	if (!skb) {
+		dev->stats.rx_dropped++;
+		pr_err_once("drop packet: no memory for skb\n");
+		goto out;
+	}
+
+	skb->dev = dev;
+
+	if (nd->is_mamac) {
+		/* dest */
+		ether_addr_copy(skb_put(skb, ETH_ALEN), dev->dev_addr);
+
+		/* src */
+		skb_put_data(skb, &zero, 4);
+		skb_put_data(skb, buf + 5, 2);
+
+		/* eth type */
+		skb_put_data(skb, buf + 10, 2);
+
+		buf += MDP_HDR_LEN;
+		len -= MDP_HDR_LEN;
+	} else {
+		buf += MEP_HDR_LEN;
+		len -= MEP_HDR_LEN;
+	}
+
+	skb_put_data(skb, buf, len);
+	skb->protocol = eth_type_trans(skb, dev);
+	skb_len = skb->len;
+	if (netif_rx(skb) == NET_RX_SUCCESS) {
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += skb_len;
+	} else {
+		dev->stats.rx_dropped++;
+	}
+
+out:
+	most_put_mbo(mbo);
+
+put_nd:
+	dev_put(nd->dev);
+	return ret;
+}
+
+static struct most_aim aim = {
+	.name = "networking",
+	.probe_channel = aim_probe_channel,
+	.disconnect_channel = aim_disconnect_channel,
+	.tx_completion = aim_resume_tx_channel,
+	.rx_completion = aim_rx_data,
+};
+
+static int __init most_net_init(void)
+{
+	spin_lock_init(&list_lock);
+	mutex_init(&probe_disc_mt);
+	return most_register_aim(&aim);
+}
+
+static void __exit most_net_exit(void)
+{
+	most_deregister_aim(&aim);
+}
+
+/**
+ * on_netinfo - callback for HDM to be informed about HW's MAC
+ * @param iface - most interface instance
+ * @param link_stat - link status
+ * @param mac_addr - MAC address
+ */
+static void on_netinfo(struct most_interface *iface,
+		       unsigned char link_stat, unsigned char *mac_addr)
+{
+	struct net_dev_context *nd;
+	struct net_device *dev;
+	const u8 *m = mac_addr;
+
+	nd = get_net_dev_hold(iface);
+	if (!nd)
+		return;
+
+	dev = nd->dev;
+
+	if (link_stat)
+		netif_carrier_on(dev);
+	else
+		netif_carrier_off(dev);
+
+	if (m && is_valid_ether_addr(m)) {
+		if (!is_valid_ether_addr(dev->dev_addr)) {
+			netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
+				    m[0], m[1], m[2], m[3], m[4], m[5]);
+			ether_addr_copy(dev->dev_addr, m);
+			netif_dormant_off(dev);
+		} else if (!ether_addr_equal(dev->dev_addr, m)) {
+			netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
+				    m[0], m[1], m[2], m[3], m[4], m[5]);
+		}
+	}
+
+	dev_put(nd->dev);
+}
+
+module_init(most_net_init);
+module_exit(most_net_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@xxxxxx>");
+MODULE_DESCRIPTION("Networking Application Interface Module for MostCore");
diff --git a/drivers/staging/most/sound/Kconfig b/drivers/staging/most/sound/Kconfig
new file mode 100644
index 0000000..115262a
--- /dev/null
+++ b/drivers/staging/most/sound/Kconfig
@@ -0,0 +1,13 @@
+#
+# MOST ALSA configuration
+#
+
+config MOST_SOUND
+	tristate "Sound"
+	depends on SND
+	select SND_PCM
+	---help---
+	  Say Y here if you want to commumicate via ALSA/sound devices.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called most_sound.
diff --git a/drivers/staging/most/sound/Makefile b/drivers/staging/most/sound/Makefile
new file mode 100644
index 0000000..eee8774
--- /dev/null
+++ b/drivers/staging/most/sound/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_SOUND) += most_sound.o
+
+most_sound-objs := sound.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
new file mode 100644
index 0000000..72603ae
--- /dev/null
+++ b/drivers/staging/most/sound/sound.c
@@ -0,0 +1,765 @@
+/*
+ * sound.c - Audio Application Interface Module for Mostcore
+ *
+ * Copyright (C) 2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <most/core.h>
+
+#define DRIVER_NAME "sound"
+
+static struct list_head dev_list;
+static struct most_aim audio_aim;
+
+/**
+ * struct channel - private structure to keep channel specific data
+ * @substream: stores the substream structure
+ * @iface: interface for which the channel belongs to
+ * @cfg: channel configuration
+ * @card: registered sound card
+ * @list: list for private use
+ * @id: channel index
+ * @period_pos: current period position (ring buffer)
+ * @buffer_pos: current buffer position (ring buffer)
+ * @is_stream_running: identifies whether a stream is running or not
+ * @opened: set when the stream is opened
+ * @playback_task: playback thread
+ * @playback_waitq: waitq used by playback thread
+ */
+struct channel {
+	struct snd_pcm_substream *substream;
+	struct snd_pcm_hardware pcm_hardware;
+	struct most_interface *iface;
+	struct most_channel_config *cfg;
+	struct snd_card *card;
+	struct list_head list;
+	int id;
+	unsigned int period_pos;
+	unsigned int buffer_pos;
+	bool is_stream_running;
+
+	struct task_struct *playback_task;
+	wait_queue_head_t playback_waitq;
+
+	void (*copy_fn)(void *alsa, void *most, unsigned int bytes);
+};
+
+#define MOST_PCM_INFO (SNDRV_PCM_INFO_MMAP | \
+		       SNDRV_PCM_INFO_MMAP_VALID | \
+		       SNDRV_PCM_INFO_BATCH | \
+		       SNDRV_PCM_INFO_INTERLEAVED | \
+		       SNDRV_PCM_INFO_BLOCK_TRANSFER)
+
+#define swap16(val) ( \
+	(((u16)(val) << 8) & (u16)0xFF00) | \
+	(((u16)(val) >> 8) & (u16)0x00FF))
+
+#define swap32(val) ( \
+	(((u32)(val) << 24) & (u32)0xFF000000) | \
+	(((u32)(val) <<  8) & (u32)0x00FF0000) | \
+	(((u32)(val) >>  8) & (u32)0x0000FF00) | \
+	(((u32)(val) >> 24) & (u32)0x000000FF))
+
+static void swap_copy16(u16 *dest, const u16 *source, unsigned int bytes)
+{
+	unsigned int i = 0;
+
+	while (i < (bytes / 2)) {
+		dest[i] = swap16(source[i]);
+		i++;
+	}
+}
+
+static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
+{
+	unsigned int i = 0;
+
+	while (i < bytes - 2) {
+		dest[i] = source[i + 2];
+		dest[i + 1] = source[i + 1];
+		dest[i + 2] = source[i];
+		i += 3;
+	}
+}
+
+static void swap_copy32(u32 *dest, const u32 *source, unsigned int bytes)
+{
+	unsigned int i = 0;
+
+	while (i < bytes / 4) {
+		dest[i] = swap32(source[i]);
+		i++;
+	}
+}
+
+static void alsa_to_most_memcpy(void *alsa, void *most, unsigned int bytes)
+{
+	memcpy(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy16(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy16(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy24(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy24(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy32(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy32(most, alsa, bytes);
+}
+
+static void most_to_alsa_memcpy(void *alsa, void *most, unsigned int bytes)
+{
+	memcpy(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy16(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy16(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy24(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy24(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy32(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy32(alsa, most, bytes);
+}
+
+/**
+ * get_channel - get pointer to channel
+ * @iface: interface structure
+ * @channel_id: channel ID
+ *
+ * This traverses the channel list and returns the channel matching the
+ * ID and interface.
+ *
+ * Returns pointer to channel on success or NULL otherwise.
+ */
+static struct channel *get_channel(struct most_interface *iface,
+				   int channel_id)
+{
+	struct channel *channel, *tmp;
+
+	list_for_each_entry_safe(channel, tmp, &dev_list, list) {
+		if ((channel->iface == iface) && (channel->id == channel_id))
+			return channel;
+	}
+
+	return NULL;
+}
+
+/**
+ * copy_data - implements data copying function
+ * @channel: channel
+ * @mbo: MBO from core
+ *
+ * Copy data from/to ring buffer to/from MBO and update the buffer position
+ */
+static bool copy_data(struct channel *channel, struct mbo *mbo)
+{
+	struct snd_pcm_runtime *const runtime = channel->substream->runtime;
+	unsigned int const frame_bytes = channel->cfg->subbuffer_size;
+	unsigned int const buffer_size = runtime->buffer_size;
+	unsigned int frames;
+	unsigned int fr0;
+
+	if (channel->cfg->direction & MOST_CH_RX)
+		frames = mbo->processed_length / frame_bytes;
+	else
+		frames = mbo->buffer_length / frame_bytes;
+	fr0 = min(buffer_size - channel->buffer_pos, frames);
+
+	channel->copy_fn(runtime->dma_area + channel->buffer_pos * frame_bytes,
+			 mbo->virt_address,
+			 fr0 * frame_bytes);
+
+	if (frames > fr0) {
+		/* wrap around at end of ring buffer */
+		channel->copy_fn(runtime->dma_area,
+				 mbo->virt_address + fr0 * frame_bytes,
+				 (frames - fr0) * frame_bytes);
+	}
+
+	channel->buffer_pos += frames;
+	if (channel->buffer_pos >= buffer_size)
+		channel->buffer_pos -= buffer_size;
+	channel->period_pos += frames;
+	if (channel->period_pos >= runtime->period_size) {
+		channel->period_pos -= runtime->period_size;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * playback_thread - function implements the playback thread
+ * @data: private data
+ *
+ * Thread which does the playback functionality in a loop. It waits for a free
+ * MBO from mostcore for a particular channel and copy the data from ring buffer
+ * to MBO. Submit the MBO back to mostcore, after copying the data.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int playback_thread(void *data)
+{
+	struct channel *const channel = data;
+
+	while (!kthread_should_stop()) {
+		struct mbo *mbo = NULL;
+		bool period_elapsed = false;
+
+		wait_event_interruptible(
+			channel->playback_waitq,
+			kthread_should_stop() ||
+			(channel->is_stream_running &&
+			 (mbo = most_get_mbo(channel->iface, channel->id,
+					     &audio_aim))));
+		if (!mbo)
+			continue;
+
+		if (channel->is_stream_running)
+			period_elapsed = copy_data(channel, mbo);
+		else
+			memset(mbo->virt_address, 0, mbo->buffer_length);
+
+		most_submit_mbo(mbo);
+		if (period_elapsed)
+			snd_pcm_period_elapsed(channel->substream);
+	}
+
+	return 0;
+}
+
+/**
+ * pcm_open - implements open callback function for PCM middle layer
+ * @substream: pointer to ALSA PCM substream
+ *
+ * This is called when a PCM substream is opened. At least, the function should
+ * initialize the runtime->hw record.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_open(struct snd_pcm_substream *substream)
+{
+	struct channel *channel = substream->private_data;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct most_channel_config *cfg = channel->cfg;
+
+	channel->substream = substream;
+
+	if (cfg->direction == MOST_CH_TX) {
+		channel->playback_task = kthread_run(playback_thread, channel,
+						     "most_audio_playback");
+		if (IS_ERR(channel->playback_task)) {
+			pr_err("Couldn't start thread\n");
+			return PTR_ERR(channel->playback_task);
+		}
+	}
+
+	if (most_start_channel(channel->iface, channel->id, &audio_aim)) {
+		pr_err("most_start_channel() failed!\n");
+		if (cfg->direction == MOST_CH_TX)
+			kthread_stop(channel->playback_task);
+		return -EBUSY;
+	}
+
+	runtime->hw = channel->pcm_hardware;
+	return 0;
+}
+
+/**
+ * pcm_close - implements close callback function for PCM middle layer
+ * @substream: sub-stream pointer
+ *
+ * Obviously, this is called when a PCM substream is closed. Any private
+ * instance for a PCM substream allocated in the open callback will be
+ * released here.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_close(struct snd_pcm_substream *substream)
+{
+	struct channel *channel = substream->private_data;
+
+	if (channel->cfg->direction == MOST_CH_TX)
+		kthread_stop(channel->playback_task);
+	most_stop_channel(channel->iface, channel->id, &audio_aim);
+
+	return 0;
+}
+
+/**
+ * pcm_hw_params - implements hw_params callback function for PCM middle layer
+ * @substream: sub-stream pointer
+ * @hw_params: contains the hardware parameters set by the application
+ *
+ * This is called when the hardware parameters is set by the application, that
+ * is, once when the buffer size, the period size, the format, etc. are defined
+ * for the PCM substream. Many hardware setups should be done is this callback,
+ * including the allocation of buffers.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_hw_params(struct snd_pcm_substream *substream,
+			 struct snd_pcm_hw_params *hw_params)
+{
+	struct channel *channel = substream->private_data;
+
+	if ((params_channels(hw_params) > channel->pcm_hardware.channels_max) ||
+	    (params_channels(hw_params) < channel->pcm_hardware.channels_min)) {
+		pr_err("Requested number of channels not supported.\n");
+		return -EINVAL;
+	}
+	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+						params_buffer_bytes(hw_params));
+}
+
+/**
+ * pcm_hw_free - implements hw_free callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This is called to release the resources allocated via hw_params.
+ * This function will be always called before the close callback is called.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+/**
+ * pcm_prepare - implements prepare callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This callback is called when the PCM is "prepared". Format rate, sample rate,
+ * etc., can be set here. This callback can be called many times at each setup.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct channel *channel = substream->private_data;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct most_channel_config *cfg = channel->cfg;
+	int width = snd_pcm_format_physical_width(runtime->format);
+
+	channel->copy_fn = NULL;
+
+	if (cfg->direction == MOST_CH_TX) {
+		if (snd_pcm_format_big_endian(runtime->format) || width == 8)
+			channel->copy_fn = alsa_to_most_memcpy;
+		else if (width == 16)
+			channel->copy_fn = alsa_to_most_copy16;
+		else if (width == 24)
+			channel->copy_fn = alsa_to_most_copy24;
+		else if (width == 32)
+			channel->copy_fn = alsa_to_most_copy32;
+	} else {
+		if (snd_pcm_format_big_endian(runtime->format) || width == 8)
+			channel->copy_fn = most_to_alsa_memcpy;
+		else if (width == 16)
+			channel->copy_fn = most_to_alsa_copy16;
+		else if (width == 24)
+			channel->copy_fn = most_to_alsa_copy24;
+		else if (width == 32)
+			channel->copy_fn = most_to_alsa_copy32;
+	}
+
+	if (!channel->copy_fn) {
+		pr_err("unsupported format\n");
+		return -EINVAL;
+	}
+
+	channel->period_pos = 0;
+	channel->buffer_pos = 0;
+
+	return 0;
+}
+
+/**
+ * pcm_trigger - implements trigger callback function for PCM middle layer
+ * @substream: substream pointer
+ * @cmd: action to perform
+ *
+ * This is called when the PCM is started, stopped or paused. The action will be
+ * specified in the second argument, SNDRV_PCM_TRIGGER_XXX
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct channel *channel = substream->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		channel->is_stream_running = true;
+		wake_up_interruptible(&channel->playback_waitq);
+		return 0;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+		channel->is_stream_running = false;
+		return 0;
+
+	default:
+		pr_info("%s(), invalid\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * pcm_pointer - implements pointer callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This callback is called when the PCM middle layer inquires the current
+ * hardware position on the buffer. The position must be returned in frames,
+ * ranging from 0 to buffer_size-1.
+ */
+static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct channel *channel = substream->private_data;
+
+	return channel->buffer_pos;
+}
+
+/**
+ * Initialization of struct snd_pcm_ops
+ */
+static const struct snd_pcm_ops pcm_ops = {
+	.open       = pcm_open,
+	.close      = pcm_close,
+	.ioctl      = snd_pcm_lib_ioctl,
+	.hw_params  = pcm_hw_params,
+	.hw_free    = pcm_hw_free,
+	.prepare    = pcm_prepare,
+	.trigger    = pcm_trigger,
+	.pointer    = pcm_pointer,
+	.page       = snd_pcm_lib_get_vmalloc_page,
+	.mmap       = snd_pcm_lib_mmap_vmalloc,
+};
+
+static int split_arg_list(char *buf, char **card_name, char **pcm_format)
+{
+	*card_name = strsep(&buf, ".");
+	if (!*card_name)
+		return -EIO;
+	*pcm_format = strsep(&buf, ".\n");
+	if (!*pcm_format)
+		return -EIO;
+	return 0;
+}
+
+static int audio_set_hw_params(struct snd_pcm_hardware *pcm_hw,
+			       char *pcm_format,
+			       struct most_channel_config *cfg)
+{
+	pcm_hw->info = MOST_PCM_INFO;
+	pcm_hw->rates = SNDRV_PCM_RATE_48000;
+	pcm_hw->rate_min = 48000;
+	pcm_hw->rate_max = 48000;
+	pcm_hw->buffer_bytes_max = cfg->num_buffers * cfg->buffer_size;
+	pcm_hw->period_bytes_min = cfg->buffer_size;
+	pcm_hw->period_bytes_max = cfg->buffer_size;
+	pcm_hw->periods_min = 1;
+	pcm_hw->periods_max = cfg->num_buffers;
+
+	if (!strcmp(pcm_format, "1x8")) {
+		if (cfg->subbuffer_size != 1)
+			goto error;
+		pr_info("PCM format is 8-bit mono\n");
+		pcm_hw->channels_min = 1;
+		pcm_hw->channels_max = 1;
+		pcm_hw->formats = SNDRV_PCM_FMTBIT_S8;
+	} else if (!strcmp(pcm_format, "2x16")) {
+		if (cfg->subbuffer_size != 4)
+			goto error;
+		pr_info("PCM format is 16-bit stereo\n");
+		pcm_hw->channels_min = 2;
+		pcm_hw->channels_max = 2;
+		pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
+				  SNDRV_PCM_FMTBIT_S16_BE;
+	} else if (!strcmp(pcm_format, "2x24")) {
+		if (cfg->subbuffer_size != 6)
+			goto error;
+		pr_info("PCM format is 24-bit stereo\n");
+		pcm_hw->channels_min = 2;
+		pcm_hw->channels_max = 2;
+		pcm_hw->formats = SNDRV_PCM_FMTBIT_S24_3LE |
+				  SNDRV_PCM_FMTBIT_S24_3BE;
+	} else if (!strcmp(pcm_format, "2x32")) {
+		if (cfg->subbuffer_size != 8)
+			goto error;
+		pr_info("PCM format is 32-bit stereo\n");
+		pcm_hw->channels_min = 2;
+		pcm_hw->channels_max = 2;
+		pcm_hw->formats = SNDRV_PCM_FMTBIT_S32_LE |
+				  SNDRV_PCM_FMTBIT_S32_BE;
+	} else if (!strcmp(pcm_format, "6x16")) {
+		if (cfg->subbuffer_size != 12)
+			goto error;
+		pr_info("PCM format is 16-bit 5.1 multi channel\n");
+		pcm_hw->channels_min = 6;
+		pcm_hw->channels_max = 6;
+		pcm_hw->formats = SNDRV_PCM_FMTBIT_S16_LE |
+				  SNDRV_PCM_FMTBIT_S16_BE;
+	} else {
+		pr_err("PCM format %s not supported\n", pcm_format);
+		return -EIO;
+	}
+	return 0;
+error:
+	pr_err("Audio resolution doesn't fit subbuffer size\n");
+	return -EINVAL;
+}
+
+/**
+ * audio_probe_channel - probe function of the driver module
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ * @cfg: pointer to actual channel configuration
+ * @parent: pointer to kobject (needed for sysfs hook-up)
+ * @arg_list: string that provides the name of the device to be created in /dev
+ *	      plus the desired audio resolution
+ *
+ * Creates sound card, pcm device, sets pcm ops and registers sound card.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_probe_channel(struct most_interface *iface, int channel_id,
+			       struct most_channel_config *cfg,
+			       struct kobject *parent, char *arg_list)
+{
+	struct channel *channel;
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	int playback_count = 0;
+	int capture_count = 0;
+	int ret;
+	int direction;
+	char *card_name;
+	char *pcm_format;
+
+	if (!iface)
+		return -EINVAL;
+
+	if (cfg->data_type != MOST_CH_SYNC) {
+		pr_err("Incompatible channel type\n");
+		return -EINVAL;
+	}
+
+	if (get_channel(iface, channel_id)) {
+		pr_err("channel (%s:%d) is already linked\n",
+		       iface->description, channel_id);
+		return -EINVAL;
+	}
+
+	if (cfg->direction == MOST_CH_TX) {
+		playback_count = 1;
+		direction = SNDRV_PCM_STREAM_PLAYBACK;
+	} else {
+		capture_count = 1;
+		direction = SNDRV_PCM_STREAM_CAPTURE;
+	}
+
+	ret = split_arg_list(arg_list, &card_name, &pcm_format);
+	if (ret < 0) {
+		pr_info("PCM format missing\n");
+		return ret;
+	}
+
+	ret = snd_card_new(NULL, -1, card_name, THIS_MODULE,
+			   sizeof(*channel), &card);
+	if (ret < 0)
+		return ret;
+
+	channel = card->private_data;
+	channel->card = card;
+	channel->cfg = cfg;
+	channel->iface = iface;
+	channel->id = channel_id;
+	init_waitqueue_head(&channel->playback_waitq);
+
+	ret = audio_set_hw_params(&channel->pcm_hardware, pcm_format, cfg);
+	if (ret)
+		goto err_free_card;
+
+	snprintf(card->driver, sizeof(card->driver), "%s", DRIVER_NAME);
+	snprintf(card->shortname, sizeof(card->shortname), "Microchip MOST:%d",
+		 card->number);
+	snprintf(card->longname, sizeof(card->longname), "%s at %s, ch %d",
+		 card->shortname, iface->description, channel_id);
+
+	ret = snd_pcm_new(card, card_name, 0, playback_count,
+			  capture_count, &pcm);
+	if (ret < 0)
+		goto err_free_card;
+
+	pcm->private_data = channel;
+
+	snd_pcm_set_ops(pcm, direction, &pcm_ops);
+
+	ret = snd_card_register(card);
+	if (ret < 0)
+		goto err_free_card;
+
+	list_add_tail(&channel->list, &dev_list);
+
+	return 0;
+
+err_free_card:
+	snd_card_free(card);
+	return ret;
+}
+
+/**
+ * audio_disconnect_channel - function to disconnect a channel
+ * @iface: pointer to interface instance
+ * @channel_id: channel index
+ *
+ * This frees allocated memory and removes the sound card from ALSA
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_disconnect_channel(struct most_interface *iface,
+				    int channel_id)
+{
+	struct channel *channel;
+
+	channel = get_channel(iface, channel_id);
+	if (!channel) {
+		pr_err("sound_disconnect_channel(), invalid channel %d\n",
+		       channel_id);
+		return -EINVAL;
+	}
+
+	list_del(&channel->list);
+	snd_card_free(channel->card);
+
+	return 0;
+}
+
+/**
+ * audio_rx_completion - completion handler for rx channels
+ * @mbo: pointer to buffer object that has completed
+ *
+ * This searches for the channel this MBO belongs to and copy the data from MBO
+ * to ring buffer
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_rx_completion(struct mbo *mbo)
+{
+	struct channel *channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
+	bool period_elapsed = false;
+
+	if (!channel) {
+		pr_err("sound_rx_completion(), invalid channel %d\n",
+		       mbo->hdm_channel_id);
+		return -EINVAL;
+	}
+
+	if (channel->is_stream_running)
+		period_elapsed = copy_data(channel, mbo);
+
+	most_put_mbo(mbo);
+
+	if (period_elapsed)
+		snd_pcm_period_elapsed(channel->substream);
+
+	return 0;
+}
+
+/**
+ * audio_tx_completion - completion handler for tx channels
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ *
+ * This searches the channel that belongs to this combination of interface
+ * pointer and channel ID and wakes a process sitting in the wait queue of
+ * this channel.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_tx_completion(struct most_interface *iface, int channel_id)
+{
+	struct channel *channel = get_channel(iface, channel_id);
+
+	if (!channel) {
+		pr_err("sound_tx_completion(), invalid channel %d\n",
+		       channel_id);
+		return -EINVAL;
+	}
+
+	wake_up_interruptible(&channel->playback_waitq);
+
+	return 0;
+}
+
+/**
+ * Initialization of the struct most_aim
+ */
+static struct most_aim audio_aim = {
+	.name = DRIVER_NAME,
+	.probe_channel = audio_probe_channel,
+	.disconnect_channel = audio_disconnect_channel,
+	.rx_completion = audio_rx_completion,
+	.tx_completion = audio_tx_completion,
+};
+
+static int __init audio_init(void)
+{
+	pr_info("init()\n");
+
+	INIT_LIST_HEAD(&dev_list);
+
+	return most_register_aim(&audio_aim);
+}
+
+static void __exit audio_exit(void)
+{
+	struct channel *channel, *tmp;
+
+	pr_info("exit()\n");
+
+	list_for_each_entry_safe(channel, tmp, &dev_list, list) {
+		list_del(&channel->list);
+		snd_card_free(channel->card);
+	}
+
+	most_deregister_aim(&audio_aim);
+}
+
+module_init(audio_init);
+module_exit(audio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@xxxxxxxxxxxxx>");
+MODULE_DESCRIPTION("Audio Application Interface Module for MostCore");
diff --git a/drivers/staging/most/usb/Kconfig b/drivers/staging/most/usb/Kconfig
new file mode 100644
index 0000000..ebbdb57
--- /dev/null
+++ b/drivers/staging/most/usb/Kconfig
@@ -0,0 +1,13 @@
+#
+# MOST USB configuration
+#
+
+config MOST_USB
+	tristate "USB"
+	depends on USB && NET
+	---help---
+	  Say Y here if you want to connect via USB to network tranceiver.
+	  This device driver depends on the networking AIM.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called most_usb.
diff --git a/drivers/staging/most/usb/Makefile b/drivers/staging/most/usb/Makefile
new file mode 100644
index 0000000..18d28cb
--- /dev/null
+++ b/drivers/staging/most/usb/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_USB) += most_usb.o
+
+most_usb-objs := usb.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c
new file mode 100644
index 0000000..2e4f237
--- /dev/null
+++ b/drivers/staging/most/usb/usb.c
@@ -0,0 +1,1307 @@
+/*
+ * usb.c - Hardware dependent module for USB
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+#include <most/core.h>
+
+#define USB_MTU			512
+#define NO_ISOCHRONOUS_URB	0
+#define AV_PACKETS_PER_XACT	2
+#define BUF_CHAIN_SIZE		0xFFFF
+#define MAX_NUM_ENDPOINTS	30
+#define MAX_SUFFIX_LEN		10
+#define MAX_STRING_LEN		80
+#define MAX_BUF_SIZE		0xFFFF
+
+#define USB_VENDOR_ID_SMSC	0x0424  /* VID: SMSC */
+#define USB_DEV_ID_BRDG		0xC001  /* PID: USB Bridge */
+#define USB_DEV_ID_OS81118	0xCF18  /* PID: USB OS81118 */
+#define USB_DEV_ID_OS81119	0xCF19  /* PID: USB OS81119 */
+#define USB_DEV_ID_OS81210	0xCF30  /* PID: USB OS81210 */
+/* DRCI Addresses */
+#define DRCI_REG_NI_STATE	0x0100
+#define DRCI_REG_PACKET_BW	0x0101
+#define DRCI_REG_NODE_ADDR	0x0102
+#define DRCI_REG_NODE_POS	0x0103
+#define DRCI_REG_MEP_FILTER	0x0140
+#define DRCI_REG_HASH_TBL0	0x0141
+#define DRCI_REG_HASH_TBL1	0x0142
+#define DRCI_REG_HASH_TBL2	0x0143
+#define DRCI_REG_HASH_TBL3	0x0144
+#define DRCI_REG_HW_ADDR_HI	0x0145
+#define DRCI_REG_HW_ADDR_MI	0x0146
+#define DRCI_REG_HW_ADDR_LO	0x0147
+#define DRCI_REG_BASE		0x1100
+#define DRCI_COMMAND		0x02
+#define DRCI_READ_REQ		0xA0
+#define DRCI_WRITE_REQ		0xA1
+
+/**
+ * struct most_dci_obj - Direct Communication Interface
+ * @kobj:position in sysfs
+ * @usb_device: pointer to the usb device
+ * @reg_addr: register address for arbitrary DCI access
+ */
+struct most_dci_obj {
+	struct kobject kobj;
+	struct usb_device *usb_device;
+	u16 reg_addr;
+};
+
+#define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
+
+struct most_dev;
+
+struct clear_hold_work {
+	struct work_struct ws;
+	struct most_dev *mdev;
+	unsigned int channel;
+	int pipe;
+};
+
+#define to_clear_hold_work(w) container_of(w, struct clear_hold_work, ws)
+
+/**
+ * struct most_dev - holds all usb interface specific stuff
+ * @parent: parent object in sysfs
+ * @usb_device: pointer to usb device
+ * @iface: hardware interface
+ * @cap: channel capabilities
+ * @conf: channel configuration
+ * @dci: direct communication interface of hardware
+ * @ep_address: endpoint address table
+ * @description: device description
+ * @suffix: suffix for channel name
+ * @channel_lock: synchronize channel access
+ * @padding_active: indicates channel uses padding
+ * @is_channel_healthy: health status table of each channel
+ * @busy_urbs: list of anchored items
+ * @io_mutex: synchronize I/O with disconnect
+ * @link_stat_timer: timer for link status reports
+ * @poll_work_obj: work for polling link status
+ */
+struct most_dev {
+	struct kobject *parent;
+	struct usb_device *usb_device;
+	struct most_interface iface;
+	struct most_channel_capability *cap;
+	struct most_channel_config *conf;
+	struct most_dci_obj *dci;
+	u8 *ep_address;
+	char description[MAX_STRING_LEN];
+	char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
+	spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
+	bool padding_active[MAX_NUM_ENDPOINTS];
+	bool is_channel_healthy[MAX_NUM_ENDPOINTS];
+	struct clear_hold_work clear_work[MAX_NUM_ENDPOINTS];
+	struct usb_anchor *busy_urbs;
+	struct mutex io_mutex;
+	struct timer_list link_stat_timer;
+	struct work_struct poll_work_obj;
+	void (*on_netinfo)(struct most_interface *, unsigned char,
+			   unsigned char *);
+};
+
+#define to_mdev(d) container_of(d, struct most_dev, iface)
+#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
+
+static void wq_clear_halt(struct work_struct *wq_obj);
+static void wq_netinfo(struct work_struct *wq_obj);
+
+/**
+ * drci_rd_reg - read a DCI register
+ * @dev: usb device
+ * @reg: register address
+ * @buf: buffer to store data
+ *
+ * This is reads data from INIC's direct register communication interface
+ */
+static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
+{
+	int retval;
+	__le16 *dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
+	u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+
+	if (!dma_buf)
+		return -ENOMEM;
+
+	retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+				 DRCI_READ_REQ, req_type,
+				 0x0000,
+				 reg, dma_buf, sizeof(*dma_buf), 5 * HZ);
+	*buf = le16_to_cpu(*dma_buf);
+	kfree(dma_buf);
+
+	return retval;
+}
+
+/**
+ * drci_wr_reg - write a DCI register
+ * @dev: usb device
+ * @reg: register address
+ * @data: data to write
+ *
+ * This is writes data to INIC's direct register communication interface
+ */
+static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
+{
+	return usb_control_msg(dev,
+			       usb_sndctrlpipe(dev, 0),
+			       DRCI_WRITE_REQ,
+			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			       data,
+			       reg,
+			       NULL,
+			       0,
+			       5 * HZ);
+}
+
+static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
+{
+	return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1);
+}
+
+/**
+ * get_stream_frame_size - calculate frame size of current configuration
+ * @cfg: channel configuration
+ */
+static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
+{
+	unsigned int frame_size = 0;
+	unsigned int sub_size = cfg->subbuffer_size;
+
+	if (!sub_size) {
+		pr_warn("Misconfig: Subbuffer size zero.\n");
+		return frame_size;
+	}
+	switch (cfg->data_type) {
+	case MOST_CH_ISOC:
+		frame_size = AV_PACKETS_PER_XACT * sub_size;
+		break;
+	case MOST_CH_SYNC:
+		if (cfg->packets_per_xact == 0) {
+			pr_warn("Misconfig: Packets per XACT zero\n");
+			frame_size = 0;
+		} else if (cfg->packets_per_xact == 0xFF) {
+			frame_size = (USB_MTU / sub_size) * sub_size;
+		} else {
+			frame_size = cfg->packets_per_xact * sub_size;
+		}
+		break;
+	default:
+		pr_warn("Query frame size of non-streaming channel\n");
+		break;
+	}
+	return frame_size;
+}
+
+/**
+ * hdm_poison_channel - mark buffers of this channel as invalid
+ * @iface: pointer to the interface
+ * @channel: channel ID
+ *
+ * This unlinks all URBs submitted to the HCD,
+ * calls the associated completion function of the core and removes
+ * them from the list.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int hdm_poison_channel(struct most_interface *iface, int channel)
+{
+	struct most_dev *mdev = to_mdev(iface);
+	unsigned long flags;
+	spinlock_t *lock; /* temp. lock */
+
+	if (unlikely(!iface)) {
+		dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
+		return -EIO;
+	}
+	if (unlikely(channel < 0 || channel >= iface->num_channels)) {
+		dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
+		return -ECHRNG;
+	}
+
+	lock = mdev->channel_lock + channel;
+	spin_lock_irqsave(lock, flags);
+	mdev->is_channel_healthy[channel] = false;
+	spin_unlock_irqrestore(lock, flags);
+
+	cancel_work_sync(&mdev->clear_work[channel].ws);
+
+	mutex_lock(&mdev->io_mutex);
+	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
+	if (mdev->padding_active[channel])
+		mdev->padding_active[channel] = false;
+
+	if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
+		del_timer_sync(&mdev->link_stat_timer);
+		cancel_work_sync(&mdev->poll_work_obj);
+	}
+	mutex_unlock(&mdev->io_mutex);
+	return 0;
+}
+
+/**
+ * hdm_add_padding - add padding bytes
+ * @mdev: most device
+ * @channel: channel ID
+ * @mbo: buffer object
+ *
+ * This inserts the INIC hardware specific padding bytes into a streaming
+ * channel's buffer
+ */
+static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
+{
+	struct most_channel_config *conf = &mdev->conf[channel];
+	unsigned int frame_size = get_stream_frame_size(conf);
+	unsigned int j, num_frames;
+
+	if (!frame_size)
+		return -EIO;
+	num_frames = mbo->buffer_length / frame_size;
+
+	if (num_frames < 1) {
+		dev_err(&mdev->usb_device->dev,
+			"Missed minimal transfer unit.\n");
+		return -EIO;
+	}
+
+	for (j = num_frames - 1; j > 0; j--)
+		memmove(mbo->virt_address + j * USB_MTU,
+			mbo->virt_address + j * frame_size,
+			frame_size);
+	mbo->buffer_length = num_frames * USB_MTU;
+	return 0;
+}
+
+/**
+ * hdm_remove_padding - remove padding bytes
+ * @mdev: most device
+ * @channel: channel ID
+ * @mbo: buffer object
+ *
+ * This takes the INIC hardware specific padding bytes off a streaming
+ * channel's buffer.
+ */
+static int hdm_remove_padding(struct most_dev *mdev, int channel,
+			      struct mbo *mbo)
+{
+	struct most_channel_config *const conf = &mdev->conf[channel];
+	unsigned int frame_size = get_stream_frame_size(conf);
+	unsigned int j, num_frames;
+
+	if (!frame_size)
+		return -EIO;
+	num_frames = mbo->processed_length / USB_MTU;
+
+	for (j = 1; j < num_frames; j++)
+		memmove(mbo->virt_address + frame_size * j,
+			mbo->virt_address + USB_MTU * j,
+			frame_size);
+
+	mbo->processed_length = frame_size * num_frames;
+	return 0;
+}
+
+/**
+ * hdm_write_completion - completion function for submitted Tx URBs
+ * @urb: the URB that has been completed
+ *
+ * This checks the status of the completed URB. In case the URB has been
+ * unlinked before, it is immediately freed. On any other error the MBO
+ * transfer flag is set. On success it frees allocated resources and calls
+ * the completion function.
+ *
+ * Context: interrupt!
+ */
+static void hdm_write_completion(struct urb *urb)
+{
+	struct mbo *mbo = urb->context;
+	struct most_dev *mdev = to_mdev(mbo->ifp);
+	unsigned int channel = mbo->hdm_channel_id;
+	struct device *dev = &mdev->usb_device->dev;
+	spinlock_t *lock = mdev->channel_lock + channel;
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+
+	mbo->processed_length = 0;
+	mbo->status = MBO_E_INVAL;
+	if (likely(mdev->is_channel_healthy[channel])) {
+		switch (urb->status) {
+		case 0:
+		case -ESHUTDOWN:
+			mbo->processed_length = urb->actual_length;
+			mbo->status = MBO_SUCCESS;
+			break;
+		case -EPIPE:
+			dev_warn(dev, "Broken OUT pipe detected\n");
+			mdev->is_channel_healthy[channel] = false;
+			mdev->clear_work[channel].pipe = urb->pipe;
+			schedule_work(&mdev->clear_work[channel].ws);
+			break;
+		case -ENODEV:
+		case -EPROTO:
+			mbo->status = MBO_E_CLOSE;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(lock, flags);
+
+	if (likely(mbo->complete))
+		mbo->complete(mbo);
+	usb_free_urb(urb);
+}
+
+/**
+ * hdm_read_completion - completion function for submitted Rx URBs
+ * @urb: the URB that has been completed
+ *
+ * This checks the status of the completed URB. In case the URB has been
+ * unlinked before it is immediately freed. On any other error the MBO transfer
+ * flag is set. On success it frees allocated resources, removes
+ * padding bytes -if necessary- and calls the completion function.
+ *
+ * Context: interrupt!
+ *
+ * **************************************************************************
+ *                   Error codes returned by in urb->status
+ *                   or in iso_frame_desc[n].status (for ISO)
+ * *************************************************************************
+ *
+ * USB device drivers may only test urb status values in completion handlers.
+ * This is because otherwise there would be a race between HCDs updating
+ * these values on one CPU, and device drivers testing them on another CPU.
+ *
+ * A transfer's actual_length may be positive even when an error has been
+ * reported.  That's because transfers often involve several packets, so that
+ * one or more packets could finish before an error stops further endpoint I/O.
+ *
+ * For isochronous URBs, the urb status value is non-zero only if the URB is
+ * unlinked, the device is removed, the host controller is disabled or the total
+ * transferred length is less than the requested length and the URB_SHORT_NOT_OK
+ * flag is set.  Completion handlers for isochronous URBs should only see
+ * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
+ * Individual frame descriptor status fields may report more status codes.
+ *
+ *
+ * 0			Transfer completed successfully
+ *
+ * -ENOENT		URB was synchronously unlinked by usb_unlink_urb
+ *
+ * -EINPROGRESS		URB still pending, no results yet
+ *			(That is, if drivers see this it's a bug.)
+ *
+ * -EPROTO (*, **)	a) bitstuff error
+ *			b) no response packet received within the
+ *			   prescribed bus turn-around time
+ *			c) unknown USB error
+ *
+ * -EILSEQ (*, **)	a) CRC mismatch
+ *			b) no response packet received within the
+ *			   prescribed bus turn-around time
+ *			c) unknown USB error
+ *
+ *			Note that often the controller hardware does not
+ *			distinguish among cases a), b), and c), so a
+ *			driver cannot tell whether there was a protocol
+ *			error, a failure to respond (often caused by
+ *			device disconnect), or some other fault.
+ *
+ * -ETIME (**)		No response packet received within the prescribed
+ *			bus turn-around time.  This error may instead be
+ *			reported as -EPROTO or -EILSEQ.
+ *
+ * -ETIMEDOUT		Synchronous USB message functions use this code
+ *			to indicate timeout expired before the transfer
+ *			completed, and no other error was reported by HC.
+ *
+ * -EPIPE (**)		Endpoint stalled.  For non-control endpoints,
+ *			reset this status with usb_clear_halt().
+ *
+ * -ECOMM		During an IN transfer, the host controller
+ *			received data from an endpoint faster than it
+ *			could be written to system memory
+ *
+ * -ENOSR		During an OUT transfer, the host controller
+ *			could not retrieve data from system memory fast
+ *			enough to keep up with the USB data rate
+ *
+ * -EOVERFLOW (*)	The amount of data returned by the endpoint was
+ *			greater than either the max packet size of the
+ *			endpoint or the remaining buffer size.  "Babble".
+ *
+ * -EREMOTEIO		The data read from the endpoint did not fill the
+ *			specified buffer, and URB_SHORT_NOT_OK was set in
+ *			urb->transfer_flags.
+ *
+ * -ENODEV		Device was removed.  Often preceded by a burst of
+ *			other errors, since the hub driver doesn't detect
+ *			device removal events immediately.
+ *
+ * -EXDEV		ISO transfer only partially completed
+ *			(only set in iso_frame_desc[n].status, not urb->status)
+ *
+ * -EINVAL		ISO madness, if this happens: Log off and go home
+ *
+ * -ECONNRESET		URB was asynchronously unlinked by usb_unlink_urb
+ *
+ * -ESHUTDOWN		The device or host controller has been disabled due
+ *			to some problem that could not be worked around,
+ *			such as a physical disconnect.
+ *
+ *
+ * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
+ * hardware problems such as bad devices (including firmware) or cables.
+ *
+ * (**) This is also one of several codes that different kinds of host
+ * controller use to indicate a transfer has failed because of device
+ * disconnect.  In the interval before the hub driver starts disconnect
+ * processing, devices may receive such fault reports for every request.
+ *
+ * See <https://www.kernel.org/doc/Documentation/driver-api/usb/error-codes.rst>
+ */
+static void hdm_read_completion(struct urb *urb)
+{
+	struct mbo *mbo = urb->context;
+	struct most_dev *mdev = to_mdev(mbo->ifp);
+	unsigned int channel = mbo->hdm_channel_id;
+	struct device *dev = &mdev->usb_device->dev;
+	spinlock_t *lock = mdev->channel_lock + channel;
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+
+	mbo->processed_length = 0;
+	mbo->status = MBO_E_INVAL;
+	if (likely(mdev->is_channel_healthy[channel])) {
+		switch (urb->status) {
+		case 0:
+		case -ESHUTDOWN:
+			mbo->processed_length = urb->actual_length;
+			mbo->status = MBO_SUCCESS;
+			if (mdev->padding_active[channel] &&
+			    hdm_remove_padding(mdev, channel, mbo)) {
+				mbo->processed_length = 0;
+				mbo->status = MBO_E_INVAL;
+			}
+			break;
+		case -EPIPE:
+			dev_warn(dev, "Broken IN pipe detected\n");
+			mdev->is_channel_healthy[channel] = false;
+			mdev->clear_work[channel].pipe = urb->pipe;
+			schedule_work(&mdev->clear_work[channel].ws);
+			break;
+		case -ENODEV:
+		case -EPROTO:
+			mbo->status = MBO_E_CLOSE;
+			break;
+		case -EOVERFLOW:
+			dev_warn(dev, "Babble on IN pipe detected\n");
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(lock, flags);
+
+	if (likely(mbo->complete))
+		mbo->complete(mbo);
+	usb_free_urb(urb);
+}
+
+/**
+ * hdm_enqueue - receive a buffer to be used for data transfer
+ * @iface: interface to enqueue to
+ * @channel: ID of the channel
+ * @mbo: pointer to the buffer object
+ *
+ * This allocates a new URB and fills it according to the channel
+ * that is being used for transmission of data. Before the URB is
+ * submitted it is stored in the private anchor list.
+ *
+ * Returns 0 on success. On any error the URB is freed and a error code
+ * is returned.
+ *
+ * Context: Could in _some_ cases be interrupt!
+ */
+static int hdm_enqueue(struct most_interface *iface, int channel,
+		       struct mbo *mbo)
+{
+	struct most_dev *mdev;
+	struct most_channel_config *conf;
+	struct device *dev;
+	int retval = 0;
+	struct urb *urb;
+	unsigned long length;
+	void *virt_address;
+
+	if (unlikely(!iface || !mbo))
+		return -EIO;
+	if (unlikely(iface->num_channels <= channel || channel < 0))
+		return -ECHRNG;
+
+	mdev = to_mdev(iface);
+	conf = &mdev->conf[channel];
+	dev = &mdev->usb_device->dev;
+
+	if (!mdev->usb_device)
+		return -ENODEV;
+
+	urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
+	if (!urb)
+		return -ENOMEM;
+
+	if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
+	    hdm_add_padding(mdev, channel, mbo)) {
+		retval = -EIO;
+		goto _error;
+	}
+
+	urb->transfer_dma = mbo->bus_address;
+	virt_address = mbo->virt_address;
+	length = mbo->buffer_length;
+
+	if (conf->direction & MOST_CH_TX) {
+		usb_fill_bulk_urb(urb, mdev->usb_device,
+				  usb_sndbulkpipe(mdev->usb_device,
+						  mdev->ep_address[channel]),
+				  virt_address,
+				  length,
+				  hdm_write_completion,
+				  mbo);
+		if (conf->data_type != MOST_CH_ISOC)
+			urb->transfer_flags |= URB_ZERO_PACKET;
+	} else {
+		usb_fill_bulk_urb(urb, mdev->usb_device,
+				  usb_rcvbulkpipe(mdev->usb_device,
+						  mdev->ep_address[channel]),
+				  virt_address,
+				  length + conf->extra_len,
+				  hdm_read_completion,
+				  mbo);
+	}
+	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+	usb_anchor_urb(urb, &mdev->busy_urbs[channel]);
+
+	retval = usb_submit_urb(urb, GFP_KERNEL);
+	if (retval) {
+		dev_err(dev, "URB submit failed with error %d.\n", retval);
+		goto _error_1;
+	}
+	return 0;
+
+_error_1:
+	usb_unanchor_urb(urb);
+_error:
+	usb_free_urb(urb);
+	return retval;
+}
+
+/**
+ * hdm_configure_channel - receive channel configuration from core
+ * @iface: interface
+ * @channel: channel ID
+ * @conf: structure that holds the configuration information
+ *
+ * The attached network interface controller (NIC) supports a padding mode
+ * to avoid short packets on USB, hence increasing the performance due to a
+ * lower interrupt load. This mode is default for synchronous data and can
+ * be switched on for isochronous data. In case padding is active the
+ * driver needs to know the frame size of the payload in order to calculate
+ * the number of bytes it needs to pad when transmitting or to cut off when
+ * receiving data.
+ *
+ */
+static int hdm_configure_channel(struct most_interface *iface, int channel,
+				 struct most_channel_config *conf)
+{
+	unsigned int num_frames;
+	unsigned int frame_size;
+	struct most_dev *mdev = to_mdev(iface);
+	struct device *dev = &mdev->usb_device->dev;
+
+	mdev->is_channel_healthy[channel] = true;
+	mdev->clear_work[channel].channel = channel;
+	mdev->clear_work[channel].mdev = mdev;
+	INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
+
+	if (unlikely(!iface || !conf)) {
+		dev_err(dev, "Bad interface or config pointer.\n");
+		return -EINVAL;
+	}
+	if (unlikely(channel < 0 || channel >= iface->num_channels)) {
+		dev_err(dev, "Channel ID out of range.\n");
+		return -EINVAL;
+	}
+	if (!conf->num_buffers || !conf->buffer_size) {
+		dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
+		return -EINVAL;
+	}
+
+	if (conf->data_type != MOST_CH_SYNC &&
+	    !(conf->data_type == MOST_CH_ISOC &&
+	      conf->packets_per_xact != 0xFF)) {
+		mdev->padding_active[channel] = false;
+		/*
+		 * Since the NIC's padding mode is not going to be
+		 * used, we can skip the frame size calculations and
+		 * move directly on to exit.
+		 */
+		goto exit;
+	}
+
+	mdev->padding_active[channel] = true;
+
+	frame_size = get_stream_frame_size(conf);
+	if (frame_size == 0 || frame_size > USB_MTU) {
+		dev_warn(dev, "Misconfig: frame size wrong\n");
+		return -EINVAL;
+	}
+
+	num_frames = conf->buffer_size / frame_size;
+
+	if (conf->buffer_size % frame_size) {
+		u16 old_size = conf->buffer_size;
+
+		conf->buffer_size = num_frames * frame_size;
+		dev_warn(dev, "%s: fixed buffer size (%d -> %d)\n",
+			 mdev->suffix[channel], old_size, conf->buffer_size);
+	}
+
+	/* calculate extra length to comply w/ HW padding */
+	conf->extra_len = num_frames * (USB_MTU - frame_size);
+
+exit:
+	mdev->conf[channel] = *conf;
+	if (conf->data_type == MOST_CH_ASYNC) {
+		u16 ep = mdev->ep_address[channel];
+
+		if (start_sync_ep(mdev->usb_device, ep) < 0)
+			dev_warn(dev, "sync for ep%02x failed", ep);
+	}
+	return 0;
+}
+
+/**
+ * hdm_request_netinfo - request network information
+ * @iface: pointer to interface
+ * @channel: channel ID
+ *
+ * This is used as trigger to set up the link status timer that
+ * polls for the NI state of the INIC every 2 seconds.
+ *
+ */
+static void hdm_request_netinfo(struct most_interface *iface, int channel,
+				void (*on_netinfo)(struct most_interface *,
+						   unsigned char,
+						   unsigned char *))
+{
+	struct most_dev *mdev;
+
+	BUG_ON(!iface);
+	mdev = to_mdev(iface);
+	mdev->on_netinfo = on_netinfo;
+	if (!on_netinfo)
+		return;
+
+	mdev->link_stat_timer.expires = jiffies + HZ;
+	mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
+}
+
+/**
+ * link_stat_timer_handler - schedule work obtaining mac address and link status
+ * @data: pointer to USB device instance
+ *
+ * The handler runs in interrupt context. That's why we need to defer the
+ * tasks to a work queue.
+ */
+static void link_stat_timer_handler(unsigned long data)
+{
+	struct most_dev *mdev = (struct most_dev *)data;
+
+	schedule_work(&mdev->poll_work_obj);
+	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
+	add_timer(&mdev->link_stat_timer);
+}
+
+/**
+ * wq_netinfo - work queue function to deliver latest networking information
+ * @wq_obj: object that holds data for our deferred work to do
+ *
+ * This retrieves the network interface status of the USB INIC
+ */
+static void wq_netinfo(struct work_struct *wq_obj)
+{
+	struct most_dev *mdev = to_mdev_from_work(wq_obj);
+	struct usb_device *usb_device = mdev->usb_device;
+	struct device *dev = &usb_device->dev;
+	u16 hi, mi, lo, link;
+	u8 hw_addr[6];
+
+	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
+		dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
+		return;
+	}
+
+	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
+		dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
+		return;
+	}
+
+	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
+		dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
+		return;
+	}
+
+	if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
+		dev_err(dev, "Vendor request 'link status' failed\n");
+		return;
+	}
+
+	hw_addr[0] = hi >> 8;
+	hw_addr[1] = hi;
+	hw_addr[2] = mi >> 8;
+	hw_addr[3] = mi;
+	hw_addr[4] = lo >> 8;
+	hw_addr[5] = lo;
+
+	if (mdev->on_netinfo)
+		mdev->on_netinfo(&mdev->iface, link, hw_addr);
+}
+
+/**
+ * wq_clear_halt - work queue function
+ * @wq_obj: work_struct object to execute
+ *
+ * This sends a clear_halt to the given USB pipe.
+ */
+static void wq_clear_halt(struct work_struct *wq_obj)
+{
+	struct clear_hold_work *clear_work = to_clear_hold_work(wq_obj);
+	struct most_dev *mdev = clear_work->mdev;
+	unsigned int channel = clear_work->channel;
+	int pipe = clear_work->pipe;
+
+	mutex_lock(&mdev->io_mutex);
+	most_stop_enqueue(&mdev->iface, channel);
+	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
+	if (usb_clear_halt(mdev->usb_device, pipe))
+		dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
+
+	mdev->is_channel_healthy[channel] = true;
+	most_resume_enqueue(&mdev->iface, channel);
+	mutex_unlock(&mdev->io_mutex);
+}
+
+/**
+ * hdm_usb_fops - file operation table for USB driver
+ */
+static const struct file_operations hdm_usb_fops = {
+	.owner = THIS_MODULE,
+};
+
+/**
+ * usb_device_id - ID table for HCD device probing
+ */
+static const struct usb_device_id usbid[] = {
+	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
+	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81118), },
+	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81119), },
+	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_OS81210), },
+	{ } /* Terminating entry */
+};
+
+#define MOST_DCI_RO_ATTR(_name) \
+	struct most_dci_attribute most_dci_attr_##_name = \
+		__ATTR(_name, 0444, show_value, NULL)
+
+#define MOST_DCI_ATTR(_name) \
+	struct most_dci_attribute most_dci_attr_##_name = \
+		__ATTR(_name, 0644, show_value, store_value)
+
+#define MOST_DCI_WO_ATTR(_name) \
+	struct most_dci_attribute most_dci_attr_##_name = \
+		__ATTR(_name, 0200, NULL, store_value)
+
+/**
+ * struct most_dci_attribute - to access the attributes of a dci object
+ * @attr: attributes of a dci object
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_dci_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct most_dci_obj *d,
+			struct most_dci_attribute *attr,
+			char *buf);
+	ssize_t (*store)(struct most_dci_obj *d,
+			 struct most_dci_attribute *attr,
+			 const char *buf,
+			 size_t count);
+};
+
+#define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
+
+/**
+ * dci_attr_show - show function for dci object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
+			     char *buf)
+{
+	struct most_dci_attribute *dci_attr = to_dci_attr(attr);
+	struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+	if (!dci_attr->show)
+		return -EIO;
+
+	return dci_attr->show(dci_obj, dci_attr, buf);
+}
+
+/**
+ * dci_attr_store - store function for dci object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t dci_attr_store(struct kobject *kobj,
+			      struct attribute *attr,
+			      const char *buf,
+			      size_t len)
+{
+	struct most_dci_attribute *dci_attr = to_dci_attr(attr);
+	struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+	if (!dci_attr->store)
+		return -EIO;
+
+	return dci_attr->store(dci_obj, dci_attr, buf, len);
+}
+
+static const struct sysfs_ops most_dci_sysfs_ops = {
+	.show = dci_attr_show,
+	.store = dci_attr_store,
+};
+
+/**
+ * most_dci_release - release function for dci object
+ * @kobj: pointer to kobject
+ *
+ * This frees the memory allocated for the dci object
+ */
+static void most_dci_release(struct kobject *kobj)
+{
+	struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+	kfree(dci_obj);
+}
+
+struct regs {
+	const char *name;
+	u16 reg;
+};
+
+static const struct regs ro_regs[] = {
+	{ "ni_state", DRCI_REG_NI_STATE },
+	{ "packet_bandwidth", DRCI_REG_PACKET_BW },
+	{ "node_address", DRCI_REG_NODE_ADDR },
+	{ "node_position", DRCI_REG_NODE_POS },
+};
+
+static const struct regs rw_regs[] = {
+	{ "mep_filter", DRCI_REG_MEP_FILTER },
+	{ "mep_hash0", DRCI_REG_HASH_TBL0 },
+	{ "mep_hash1", DRCI_REG_HASH_TBL1 },
+	{ "mep_hash2", DRCI_REG_HASH_TBL2 },
+	{ "mep_hash3", DRCI_REG_HASH_TBL3 },
+	{ "mep_eui48_hi", DRCI_REG_HW_ADDR_HI },
+	{ "mep_eui48_mi", DRCI_REG_HW_ADDR_MI },
+	{ "mep_eui48_lo", DRCI_REG_HW_ADDR_LO },
+};
+
+static int get_stat_reg_addr(const struct regs *regs, int size,
+			     const char *name, u16 *reg_addr)
+{
+	int i;
+
+	for (i = 0; i < size; i++) {
+		if (!strcmp(name, regs[i].name)) {
+			*reg_addr = regs[i].reg;
+			return 0;
+		}
+	}
+	return -EFAULT;
+}
+
+#define get_static_reg_addr(regs, name, reg_addr) \
+	get_stat_reg_addr(regs, ARRAY_SIZE(regs), name, reg_addr)
+
+static ssize_t show_value(struct most_dci_obj *dci_obj,
+			  struct most_dci_attribute *attr, char *buf)
+{
+	const char *name = attr->attr.name;
+	u16 val;
+	u16 reg_addr;
+	int err;
+
+	if (!strcmp(name, "arb_address"))
+		return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr);
+
+	if (!strcmp(name, "arb_value"))
+		reg_addr = dci_obj->reg_addr;
+	else if (get_static_reg_addr(ro_regs, name, &reg_addr) &&
+		 get_static_reg_addr(rw_regs, name, &reg_addr))
+		return -EFAULT;
+
+	err = drci_rd_reg(dci_obj->usb_device, reg_addr, &val);
+	if (err < 0)
+		return err;
+
+	return snprintf(buf, PAGE_SIZE, "%04x\n", val);
+}
+
+static ssize_t store_value(struct most_dci_obj *dci_obj,
+			   struct most_dci_attribute *attr,
+			   const char *buf, size_t count)
+{
+	u16 val;
+	u16 reg_addr;
+	const char *name = attr->attr.name;
+	struct usb_device *usb_dev = dci_obj->usb_device;
+	int err = kstrtou16(buf, 16, &val);
+
+	if (err)
+		return err;
+
+	if (!strcmp(name, "arb_address")) {
+		dci_obj->reg_addr = val;
+		return count;
+	}
+
+	if (!strcmp(name, "arb_value"))
+		err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
+	else if (!strcmp(name, "sync_ep"))
+		err = start_sync_ep(usb_dev, val);
+	else if (!get_static_reg_addr(rw_regs, name, &reg_addr))
+		err = drci_wr_reg(usb_dev, reg_addr, val);
+	else
+		return -EFAULT;
+
+	if (err < 0)
+		return err;
+
+	return count;
+}
+
+static MOST_DCI_RO_ATTR(ni_state);
+static MOST_DCI_RO_ATTR(packet_bandwidth);
+static MOST_DCI_RO_ATTR(node_address);
+static MOST_DCI_RO_ATTR(node_position);
+static MOST_DCI_WO_ATTR(sync_ep);
+static MOST_DCI_ATTR(mep_filter);
+static MOST_DCI_ATTR(mep_hash0);
+static MOST_DCI_ATTR(mep_hash1);
+static MOST_DCI_ATTR(mep_hash2);
+static MOST_DCI_ATTR(mep_hash3);
+static MOST_DCI_ATTR(mep_eui48_hi);
+static MOST_DCI_ATTR(mep_eui48_mi);
+static MOST_DCI_ATTR(mep_eui48_lo);
+static MOST_DCI_ATTR(arb_address);
+static MOST_DCI_ATTR(arb_value);
+
+/**
+ * most_dci_def_attrs - array of default attribute files of the dci object
+ */
+static struct attribute *most_dci_def_attrs[] = {
+	&most_dci_attr_ni_state.attr,
+	&most_dci_attr_packet_bandwidth.attr,
+	&most_dci_attr_node_address.attr,
+	&most_dci_attr_node_position.attr,
+	&most_dci_attr_sync_ep.attr,
+	&most_dci_attr_mep_filter.attr,
+	&most_dci_attr_mep_hash0.attr,
+	&most_dci_attr_mep_hash1.attr,
+	&most_dci_attr_mep_hash2.attr,
+	&most_dci_attr_mep_hash3.attr,
+	&most_dci_attr_mep_eui48_hi.attr,
+	&most_dci_attr_mep_eui48_mi.attr,
+	&most_dci_attr_mep_eui48_lo.attr,
+	&most_dci_attr_arb_address.attr,
+	&most_dci_attr_arb_value.attr,
+	NULL,
+};
+
+/**
+ * DCI ktype
+ */
+static struct kobj_type most_dci_ktype = {
+	.sysfs_ops = &most_dci_sysfs_ops,
+	.release = most_dci_release,
+	.default_attrs = most_dci_def_attrs,
+};
+
+/**
+ * create_most_dci_obj - allocates a dci object
+ * @parent: parent kobject
+ *
+ * This creates a dci object and registers it with sysfs.
+ * Returns a pointer to the object or NULL when something went wrong.
+ */
+static struct
+most_dci_obj *create_most_dci_obj(struct kobject *parent)
+{
+	struct most_dci_obj *most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
+	int retval;
+
+	if (!most_dci)
+		return NULL;
+
+	retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
+				      "dci");
+	if (retval) {
+		kobject_put(&most_dci->kobj);
+		return NULL;
+	}
+	return most_dci;
+}
+
+/**
+ * destroy_most_dci_obj - DCI object release function
+ * @p: pointer to dci object
+ */
+static void destroy_most_dci_obj(struct most_dci_obj *p)
+{
+	kobject_put(&p->kobj);
+}
+
+/**
+ * hdm_probe - probe function of USB device driver
+ * @interface: Interface of the attached USB device
+ * @id: Pointer to the USB ID table.
+ *
+ * This allocates and initializes the device instance, adds the new
+ * entry to the internal list, scans the USB descriptors and registers
+ * the interface with the core.
+ * Additionally, the DCI objects are created and the hardware is sync'd.
+ *
+ * Return 0 on success. In case of an error a negative number is returned.
+ */
+static int
+hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
+{
+	struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
+	struct usb_device *usb_dev = interface_to_usbdev(interface);
+	struct device *dev = &usb_dev->dev;
+	struct most_dev *mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+	unsigned int i;
+	unsigned int num_endpoints;
+	struct most_channel_capability *tmp_cap;
+	struct usb_endpoint_descriptor *ep_desc;
+	int ret = 0;
+
+	if (!mdev)
+		goto exit_ENOMEM;
+
+	usb_set_intfdata(interface, mdev);
+	num_endpoints = usb_iface_desc->desc.bNumEndpoints;
+	mutex_init(&mdev->io_mutex);
+	INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
+	setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
+		    (unsigned long)mdev);
+
+	mdev->usb_device = usb_dev;
+	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
+
+	mdev->iface.mod = hdm_usb_fops.owner;
+	mdev->iface.interface = ITYPE_USB;
+	mdev->iface.configure = hdm_configure_channel;
+	mdev->iface.request_netinfo = hdm_request_netinfo;
+	mdev->iface.enqueue = hdm_enqueue;
+	mdev->iface.poison_channel = hdm_poison_channel;
+	mdev->iface.description = mdev->description;
+	mdev->iface.num_channels = num_endpoints;
+
+	snprintf(mdev->description, sizeof(mdev->description),
+		 "usb_device %d-%s:%d.%d",
+		 usb_dev->bus->busnum,
+		 usb_dev->devpath,
+		 usb_dev->config->desc.bConfigurationValue,
+		 usb_iface_desc->desc.bInterfaceNumber);
+
+	mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
+	if (!mdev->conf)
+		goto exit_free;
+
+	mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
+	if (!mdev->cap)
+		goto exit_free1;
+
+	mdev->iface.channel_vector = mdev->cap;
+	mdev->iface.priv = NULL;
+
+	mdev->ep_address =
+		kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
+	if (!mdev->ep_address)
+		goto exit_free2;
+
+	mdev->busy_urbs =
+		kcalloc(num_endpoints, sizeof(*mdev->busy_urbs), GFP_KERNEL);
+	if (!mdev->busy_urbs)
+		goto exit_free3;
+
+	tmp_cap = mdev->cap;
+	for (i = 0; i < num_endpoints; i++) {
+		ep_desc = &usb_iface_desc->endpoint[i].desc;
+		mdev->ep_address[i] = ep_desc->bEndpointAddress;
+		mdev->padding_active[i] = false;
+		mdev->is_channel_healthy[i] = true;
+
+		snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
+			 mdev->ep_address[i]);
+
+		tmp_cap->name_suffix = &mdev->suffix[i][0];
+		tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
+		tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
+		tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
+		tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
+		tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
+				     MOST_CH_ISOC | MOST_CH_SYNC;
+		if (usb_endpoint_dir_in(ep_desc))
+			tmp_cap->direction = MOST_CH_RX;
+		else
+			tmp_cap->direction = MOST_CH_TX;
+		tmp_cap++;
+		init_usb_anchor(&mdev->busy_urbs[i]);
+		spin_lock_init(&mdev->channel_lock[i]);
+	}
+	dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
+		   le16_to_cpu(usb_dev->descriptor.idVendor),
+		   le16_to_cpu(usb_dev->descriptor.idProduct),
+		   usb_dev->bus->busnum,
+		   usb_dev->devnum);
+
+	dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
+		   usb_dev->bus->busnum,
+		   usb_dev->devpath,
+		   usb_dev->config->desc.bConfigurationValue,
+		   usb_iface_desc->desc.bInterfaceNumber);
+
+	mdev->parent = most_register_interface(&mdev->iface);
+	if (IS_ERR(mdev->parent)) {
+		ret = PTR_ERR(mdev->parent);
+		goto exit_free4;
+	}
+
+	mutex_lock(&mdev->io_mutex);
+	if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81118 ||
+	    le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81119 ||
+	    le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_OS81210) {
+		/* this increments the reference count of the instance
+		 * object of the core
+		 */
+		mdev->dci = create_most_dci_obj(mdev->parent);
+		if (!mdev->dci) {
+			mutex_unlock(&mdev->io_mutex);
+			most_deregister_interface(&mdev->iface);
+			ret = -ENOMEM;
+			goto exit_free4;
+		}
+
+		kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
+		mdev->dci->usb_device = mdev->usb_device;
+	}
+	mutex_unlock(&mdev->io_mutex);
+	return 0;
+
+exit_free4:
+	kfree(mdev->busy_urbs);
+exit_free3:
+	kfree(mdev->ep_address);
+exit_free2:
+	kfree(mdev->cap);
+exit_free1:
+	kfree(mdev->conf);
+exit_free:
+	kfree(mdev);
+exit_ENOMEM:
+	if (ret == 0 || ret == -ENOMEM) {
+		ret = -ENOMEM;
+		dev_err(dev, "out of memory\n");
+	}
+	return ret;
+}
+
+/**
+ * hdm_disconnect - disconnect function of USB device driver
+ * @interface: Interface of the attached USB device
+ *
+ * This deregisters the interface with the core, removes the kernel timer
+ * and frees resources.
+ *
+ * Context: hub kernel thread
+ */
+static void hdm_disconnect(struct usb_interface *interface)
+{
+	struct most_dev *mdev = usb_get_intfdata(interface);
+
+	mutex_lock(&mdev->io_mutex);
+	usb_set_intfdata(interface, NULL);
+	mdev->usb_device = NULL;
+	mutex_unlock(&mdev->io_mutex);
+
+	del_timer_sync(&mdev->link_stat_timer);
+	cancel_work_sync(&mdev->poll_work_obj);
+
+	destroy_most_dci_obj(mdev->dci);
+	most_deregister_interface(&mdev->iface);
+
+	kfree(mdev->busy_urbs);
+	kfree(mdev->cap);
+	kfree(mdev->conf);
+	kfree(mdev->ep_address);
+	kfree(mdev);
+}
+
+static struct usb_driver hdm_usb = {
+	.name = "hdm_usb",
+	.id_table = usbid,
+	.probe = hdm_probe,
+	.disconnect = hdm_disconnect,
+};
+
+module_usb_driver(hdm_usb);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@xxxxxxxxxxxxx>");
+MODULE_DESCRIPTION("HDM_4_USB");
diff --git a/drivers/staging/most/video/Kconfig b/drivers/staging/most/video/Kconfig
new file mode 100644
index 0000000..ce6af4f
--- /dev/null
+++ b/drivers/staging/most/video/Kconfig
@@ -0,0 +1,12 @@
+#
+# MOST V4L2 configuration
+#
+
+config MOST_VIDEO
+	tristate "Video"
+	depends on VIDEO_V4L2
+	---help---
+	  Say Y here if you want to commumicate via Video 4 Linux.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called most_video.
diff --git a/drivers/staging/most/video/Makefile b/drivers/staging/most/video/Makefile
new file mode 100644
index 0000000..1c8e520
--- /dev/null
+++ b/drivers/staging/most/video/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MOST_VIDEO) += most_video.o
+
+most_video-objs := video.o
+ccflags-y += -Idrivers/staging/
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
new file mode 100644
index 0000000..eed9046
--- /dev/null
+++ b/drivers/staging/most/video/video.c
@@ -0,0 +1,616 @@
+/*
+ * V4L2 AIM - V4L2 Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+
+#include <most/core.h>
+
+#define V4L2_AIM_MAX_INPUT  1
+
+static struct most_aim aim_info;
+
+struct most_video_dev {
+	struct most_interface *iface;
+	int ch_idx;
+	struct list_head list;
+	bool mute;
+
+	struct list_head pending_mbos;
+	spinlock_t list_lock;
+
+	struct v4l2_device v4l2_dev;
+	atomic_t access_ref;
+	struct video_device *vdev;
+	unsigned int ctrl_input;
+
+	struct mutex lock;
+
+	wait_queue_head_t wait_data;
+};
+
+struct aim_fh {
+	/* must be the first field of this struct! */
+	struct v4l2_fh fh;
+	struct most_video_dev *mdev;
+	u32 offs;
+};
+
+static struct list_head video_devices = LIST_HEAD_INIT(video_devices);
+static struct spinlock list_lock;
+
+static inline bool data_ready(struct most_video_dev *mdev)
+{
+	return !list_empty(&mdev->pending_mbos);
+}
+
+static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
+{
+	return list_first_entry(&mdev->pending_mbos, struct mbo, list);
+}
+
+static int aim_vdev_open(struct file *filp)
+{
+	int ret;
+	struct video_device *vdev = video_devdata(filp);
+	struct most_video_dev *mdev = video_drvdata(filp);
+	struct aim_fh *fh;
+
+	v4l2_info(&mdev->v4l2_dev, "aim_vdev_open()\n");
+
+	switch (vdev->vfl_type) {
+	case VFL_TYPE_GRABBER:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+	if (!fh)
+		return -ENOMEM;
+
+	if (!atomic_inc_and_test(&mdev->access_ref)) {
+		v4l2_err(&mdev->v4l2_dev, "too many clients\n");
+		ret = -EBUSY;
+		goto err_dec;
+	}
+
+	fh->mdev = mdev;
+	v4l2_fh_init(&fh->fh, vdev);
+	filp->private_data = fh;
+
+	v4l2_fh_add(&fh->fh);
+
+	ret = most_start_channel(mdev->iface, mdev->ch_idx, &aim_info);
+	if (ret) {
+		v4l2_err(&mdev->v4l2_dev, "most_start_channel() failed\n");
+		goto err_rm;
+	}
+
+	return 0;
+
+err_rm:
+	v4l2_fh_del(&fh->fh);
+	v4l2_fh_exit(&fh->fh);
+
+err_dec:
+	atomic_dec(&mdev->access_ref);
+	kfree(fh);
+	return ret;
+}
+
+static int aim_vdev_close(struct file *filp)
+{
+	struct aim_fh *fh = filp->private_data;
+	struct most_video_dev *mdev = fh->mdev;
+	struct mbo *mbo, *tmp;
+
+	v4l2_info(&mdev->v4l2_dev, "aim_vdev_close()\n");
+
+	/*
+	 * We need to put MBOs back before we call most_stop_channel()
+	 * to deallocate MBOs.
+	 * From the other hand mostcore still calling rx_completion()
+	 * to deliver MBOs until most_stop_channel() is called.
+	 * Use mute to work around this issue.
+	 * This must be implemented in core.
+	 */
+
+	spin_lock_irq(&mdev->list_lock);
+	mdev->mute = true;
+	list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
+		list_del(&mbo->list);
+		spin_unlock_irq(&mdev->list_lock);
+		most_put_mbo(mbo);
+		spin_lock_irq(&mdev->list_lock);
+	}
+	spin_unlock_irq(&mdev->list_lock);
+	most_stop_channel(mdev->iface, mdev->ch_idx, &aim_info);
+	mdev->mute = false;
+
+	v4l2_fh_del(&fh->fh);
+	v4l2_fh_exit(&fh->fh);
+
+	atomic_dec(&mdev->access_ref);
+	kfree(fh);
+	return 0;
+}
+
+static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
+			     size_t count, loff_t *pos)
+{
+	struct aim_fh *fh = filp->private_data;
+	struct most_video_dev *mdev = fh->mdev;
+	int ret = 0;
+
+	if (*pos)
+		return -ESPIPE;
+
+	if (!mdev)
+		return -ENODEV;
+
+	/* wait for the first buffer */
+	if (!(filp->f_flags & O_NONBLOCK)) {
+		if (wait_event_interruptible(mdev->wait_data, data_ready(mdev)))
+			return -ERESTARTSYS;
+	}
+
+	if (!data_ready(mdev))
+		return -EAGAIN;
+
+	while (count > 0 && data_ready(mdev)) {
+		struct mbo *const mbo = get_top_mbo(mdev);
+		int const rem = mbo->processed_length - fh->offs;
+		int const cnt = rem < count ? rem : count;
+
+		if (copy_to_user(buf, mbo->virt_address + fh->offs, cnt)) {
+			v4l2_err(&mdev->v4l2_dev, "read: copy_to_user failed\n");
+			if (!ret)
+				ret = -EFAULT;
+			return ret;
+		}
+
+		fh->offs += cnt;
+		count -= cnt;
+		buf += cnt;
+		ret += cnt;
+
+		if (cnt >= rem) {
+			fh->offs = 0;
+			spin_lock_irq(&mdev->list_lock);
+			list_del(&mbo->list);
+			spin_unlock_irq(&mdev->list_lock);
+			most_put_mbo(mbo);
+		}
+	}
+	return ret;
+}
+
+static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
+{
+	struct aim_fh *fh = filp->private_data;
+	struct most_video_dev *mdev = fh->mdev;
+	unsigned int mask = 0;
+
+	/* only wait if no data is available */
+	if (!data_ready(mdev))
+		poll_wait(filp, &mdev->wait_data, wait);
+	if (data_ready(mdev))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
+}
+
+static void aim_set_format_struct(struct v4l2_format *f)
+{
+	f->fmt.pix.width = 8;
+	f->fmt.pix.height = 8;
+	f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+	f->fmt.pix.bytesperline = 0;
+	f->fmt.pix.sizeimage = 188 * 2;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+	f->fmt.pix.field = V4L2_FIELD_NONE;
+	f->fmt.pix.priv = 0;
+}
+
+static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
+			  struct v4l2_format *format)
+{
+	if (format->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG)
+		return -EINVAL;
+
+	if (cmd == VIDIOC_TRY_FMT)
+		return 0;
+
+	aim_set_format_struct(format);
+
+	return 0;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	v4l2_info(&mdev->v4l2_dev, "vidioc_querycap()\n");
+
+	strlcpy(cap->driver, "v4l2_most_aim", sizeof(cap->driver));
+	strlcpy(cap->card, "MOST", sizeof(cap->card));
+	snprintf(cap->bus_info, sizeof(cap->bus_info),
+		 "%s", mdev->iface->description);
+
+	cap->capabilities =
+		V4L2_CAP_READWRITE |
+		V4L2_CAP_TUNER |
+		V4L2_CAP_VIDEO_CAPTURE;
+	return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	v4l2_info(&mdev->v4l2_dev, "vidioc_enum_fmt_vid_cap() %d\n", f->index);
+
+	if (f->index)
+		return -EINVAL;
+
+	strcpy(f->description, "MPEG");
+	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	f->flags = V4L2_FMT_FLAG_COMPRESSED;
+	f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	v4l2_info(&mdev->v4l2_dev, "vidioc_g_fmt_vid_cap()\n");
+
+	aim_set_format_struct(f);
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	return aim_set_format(mdev, VIDIOC_TRY_FMT, f);
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	return aim_set_format(mdev, VIDIOC_S_FMT, f);
+}
+
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	v4l2_info(&mdev->v4l2_dev, "vidioc_g_std()\n");
+
+	*norm = V4L2_STD_UNKNOWN;
+	return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+			     struct v4l2_input *input)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	if (input->index >= V4L2_AIM_MAX_INPUT)
+		return -EINVAL;
+
+	strcpy(input->name, "MOST Video");
+	input->type |= V4L2_INPUT_TYPE_CAMERA;
+	input->audioset = 0;
+
+	input->std = mdev->vdev->tvnorms;
+
+	return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+	*i = mdev->ctrl_input;
+	return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	v4l2_info(&mdev->v4l2_dev, "vidioc_s_input(%d)\n", index);
+
+	if (index >= V4L2_AIM_MAX_INPUT)
+		return -EINVAL;
+	mdev->ctrl_input = index;
+	return 0;
+}
+
+static const struct v4l2_file_operations aim_fops = {
+	.owner      = THIS_MODULE,
+	.open       = aim_vdev_open,
+	.release    = aim_vdev_close,
+	.read       = aim_vdev_read,
+	.poll       = aim_vdev_poll,
+	.unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+	.vidioc_querycap            = vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap    = vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap       = vidioc_g_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap     = vidioc_try_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap       = vidioc_s_fmt_vid_cap,
+	.vidioc_g_std               = vidioc_g_std,
+	.vidioc_enum_input          = vidioc_enum_input,
+	.vidioc_g_input             = vidioc_g_input,
+	.vidioc_s_input             = vidioc_s_input,
+};
+
+static const struct video_device aim_videodev_template = {
+	.fops = &aim_fops,
+	.release = video_device_release,
+	.ioctl_ops = &video_ioctl_ops,
+	.tvnorms = V4L2_STD_UNKNOWN,
+};
+
+/**************************************************************************/
+
+static struct most_video_dev *get_aim_dev(
+	struct most_interface *iface, int channel_idx)
+{
+	struct most_video_dev *mdev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&list_lock, flags);
+	list_for_each_entry(mdev, &video_devices, list) {
+		if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
+			spin_unlock_irqrestore(&list_lock, flags);
+			return mdev;
+		}
+	}
+	spin_unlock_irqrestore(&list_lock, flags);
+	return NULL;
+}
+
+static int aim_rx_data(struct mbo *mbo)
+{
+	unsigned long flags;
+	struct most_video_dev *mdev =
+		get_aim_dev(mbo->ifp, mbo->hdm_channel_id);
+
+	if (!mdev)
+		return -EIO;
+
+	spin_lock_irqsave(&mdev->list_lock, flags);
+	if (unlikely(mdev->mute)) {
+		spin_unlock_irqrestore(&mdev->list_lock, flags);
+		return -EIO;
+	}
+
+	list_add_tail(&mbo->list, &mdev->pending_mbos);
+	spin_unlock_irqrestore(&mdev->list_lock, flags);
+	wake_up_interruptible(&mdev->wait_data);
+	return 0;
+}
+
+static int aim_register_videodev(struct most_video_dev *mdev)
+{
+	int ret;
+
+	v4l2_info(&mdev->v4l2_dev, "aim_register_videodev()\n");
+
+	init_waitqueue_head(&mdev->wait_data);
+
+	/* allocate and fill v4l2 video struct */
+	mdev->vdev = video_device_alloc();
+	if (!mdev->vdev)
+		return -ENOMEM;
+
+	/* Fill the video capture device struct */
+	*mdev->vdev = aim_videodev_template;
+	mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
+	mdev->vdev->lock = &mdev->lock;
+	snprintf(mdev->vdev->name, sizeof(mdev->vdev->name), "MOST: %s",
+		 mdev->v4l2_dev.name);
+
+	/* Register the v4l2 device */
+	video_set_drvdata(mdev->vdev, mdev);
+	ret = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		v4l2_err(&mdev->v4l2_dev, "video_register_device failed (%d)\n",
+			 ret);
+		video_device_release(mdev->vdev);
+	}
+
+	return ret;
+}
+
+static void aim_unregister_videodev(struct most_video_dev *mdev)
+{
+	v4l2_info(&mdev->v4l2_dev, "aim_unregister_videodev()\n");
+
+	video_unregister_device(mdev->vdev);
+}
+
+static void aim_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+	struct most_video_dev *mdev =
+		container_of(v4l2_dev, struct most_video_dev, v4l2_dev);
+
+	v4l2_device_unregister(v4l2_dev);
+	kfree(mdev);
+}
+
+static int aim_probe_channel(struct most_interface *iface, int channel_idx,
+			     struct most_channel_config *ccfg,
+			     struct kobject *parent, char *name)
+{
+	int ret;
+	struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+
+	pr_info("aim_probe_channel(%s)\n", name);
+
+	if (mdev) {
+		pr_err("channel already linked\n");
+		return -EEXIST;
+	}
+
+	if (ccfg->direction != MOST_CH_RX) {
+		pr_err("wrong direction, expect rx\n");
+		return -EINVAL;
+	}
+
+	if (ccfg->data_type != MOST_CH_SYNC &&
+	    ccfg->data_type != MOST_CH_ISOC) {
+		pr_err("wrong channel type, expect sync or isoc\n");
+		return -EINVAL;
+	}
+
+	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+	if (!mdev)
+		return -ENOMEM;
+
+	mutex_init(&mdev->lock);
+	atomic_set(&mdev->access_ref, -1);
+	spin_lock_init(&mdev->list_lock);
+	INIT_LIST_HEAD(&mdev->pending_mbos);
+	mdev->iface = iface;
+	mdev->ch_idx = channel_idx;
+	mdev->v4l2_dev.release = aim_v4l2_dev_release;
+
+	/* Create the v4l2_device */
+	strlcpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
+	ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
+	if (ret) {
+		pr_err("v4l2_device_register() failed\n");
+		kfree(mdev);
+		return ret;
+	}
+
+	ret = aim_register_videodev(mdev);
+	if (ret)
+		goto err_unreg;
+
+	spin_lock_irq(&list_lock);
+	list_add(&mdev->list, &video_devices);
+	spin_unlock_irq(&list_lock);
+	v4l2_info(&mdev->v4l2_dev, "aim_probe_channel() done\n");
+	return 0;
+
+err_unreg:
+	v4l2_device_disconnect(&mdev->v4l2_dev);
+	v4l2_device_put(&mdev->v4l2_dev);
+	return ret;
+}
+
+static int aim_disconnect_channel(struct most_interface *iface,
+				  int channel_idx)
+{
+	struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+
+	if (!mdev) {
+		pr_err("no such channel is linked\n");
+		return -ENOENT;
+	}
+
+	v4l2_info(&mdev->v4l2_dev, "aim_disconnect_channel()\n");
+
+	spin_lock_irq(&list_lock);
+	list_del(&mdev->list);
+	spin_unlock_irq(&list_lock);
+
+	aim_unregister_videodev(mdev);
+	v4l2_device_disconnect(&mdev->v4l2_dev);
+	v4l2_device_put(&mdev->v4l2_dev);
+	return 0;
+}
+
+static struct most_aim aim_info = {
+	.name = "v4l",
+	.probe_channel = aim_probe_channel,
+	.disconnect_channel = aim_disconnect_channel,
+	.rx_completion = aim_rx_data,
+};
+
+static int __init aim_init(void)
+{
+	spin_lock_init(&list_lock);
+	return most_register_aim(&aim_info);
+}
+
+static void __exit aim_exit(void)
+{
+	struct most_video_dev *mdev, *tmp;
+
+	/*
+	 * As the mostcore currently doesn't call disconnect_channel()
+	 * for linked channels while we call most_deregister_aim()
+	 * we simulate this call here.
+	 * This must be fixed in core.
+	 */
+	spin_lock_irq(&list_lock);
+	list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
+		list_del(&mdev->list);
+		spin_unlock_irq(&list_lock);
+
+		aim_unregister_videodev(mdev);
+		v4l2_device_disconnect(&mdev->v4l2_dev);
+		v4l2_device_put(&mdev->v4l2_dev);
+		spin_lock_irq(&list_lock);
+	}
+	spin_unlock_irq(&list_lock);
+
+	most_deregister_aim(&aim_info);
+	BUG_ON(!list_empty(&video_devices));
+}
+
+module_init(aim_init);
+module_exit(aim_exit);
+
+MODULE_DESCRIPTION("V4L2 Application Interface Module for MostCore");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@xxxxxx>");
+MODULE_LICENSE("GPL");
-- 
1.9.1

_______________________________________________
devel mailing list
devel@xxxxxxxxxxxxxxxxxxxxxx
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel




[Index of Archives]     [Linux Driver Backports]     [DMA Engine]     [Linux GPIO]     [Linux SPI]     [Video for Linux]     [Linux USB Devel]     [Linux Coverity]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]
  Powered by Linux