]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - android-sdk/kernel-video.git/commitdiff
Merge branch 'p-ti-linux-3.8.y' into p-ti-android-3.8.y
authorPraneeth Bajjuri <praneeth@ti.com>
Thu, 13 Jun 2013 23:22:51 +0000 (18:22 -0500)
committerPraneeth Bajjuri <praneeth@ti.com>
Thu, 13 Jun 2013 23:22:51 +0000 (18:22 -0500)
* p-ti-linux-3.8.y: (406 commits)
  ARM: OMAP4+: omap2plus_defconfig: Enable audio via TWL6040 as module
  ASoC: OMAP4+: AESS: aess_mem: Activate AESS for memory/register access
  ARM: dts: OMAP5: AESS: Fix AESS L3 Interconnect address
  ASoC: OMAP: ABE: Pick working ABE support from LDC audio branch
  TI-Integration: ARM: OMAP2+: Fix merege by restoring omap_mcasp_init() call
  omapdss: TFCS panel: Check for successful TLC driver registration before using it
  omapdss: DSS DPLLs: Ignore PLL_PWR_STATUS on DRA7
  ARM: DRA7: dts: Add the sdma dt node and corresponding dma request lines for mmc
  ARM: dra7: dts: Add a fixed regulator node needed for eMMC
  arm/dts: dra7: Add ldo regulator for mmc1
  arm/dts: dra7: Add mmc controller nodes and board data
  ARM: DRA: hwmod: Correct the dma line names for mmc
  arch: arm: configs: Add support for DRA7 evm in omap2plus_defconfig
  arm: dts: dra7-evm: Add pinmux configs needed for display
  HACK: pinctrl: pinctrl single: Make pinctrl-single init early
  OMAPDSS:HDMI: Change PLL calculations
  omapdss: hdmi: fix deepcolor mode configuration
  ARM: dts: DRA7x: Add DMM bindings
  omapdrm: hack: Assign managers/channel to outputs in a more trivial way
  gpu: drm: omap: Use bitmaps for placement
  ...

Signed-off-by: Praneeth Bajjuri <praneeth@ti.com>
252 files changed:
Documentation/android.txt [new file with mode: 0644]
Documentation/cgroups/cgroups.txt
Documentation/cpu-freq/governors.txt
Documentation/sync.txt [new file with mode: 0644]
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/compressed/head.S
arch/arm/common/Kconfig
arch/arm/common/Makefile
arch/arm/common/fiq_debugger.c [new file with mode: 0644]
arch/arm/common/fiq_debugger_ringbuf.h [new file with mode: 0644]
arch/arm/common/fiq_glue.S [new file with mode: 0644]
arch/arm/common/fiq_glue_setup.c [new file with mode: 0644]
arch/arm/configs/android_omap_defconfig [new file with mode: 0644]
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/fiq_debugger.h [new file with mode: 0644]
arch/arm/include/asm/fiq_glue.h [new file with mode: 0644]
arch/arm/include/asm/hardirq.h
arch/arm/include/asm/hardware/coresight.h
arch/arm/include/asm/irq.h
arch/arm/include/asm/mach/mmc.h [new file with mode: 0644]
arch/arm/include/asm/rodata.h [new file with mode: 0644]
arch/arm/include/asm/smp.h
arch/arm/kernel/etm.c
arch/arm/kernel/ftrace.c
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/mm/Makefile
arch/arm/mm/cache-v6.S
arch/arm/mm/mmu.c
arch/arm/mm/rodata.c [new file with mode: 0644]
arch/x86/include/asm/idle.h
arch/x86/kernel/process.c
block/genhd.c
block/partition-generic.c
drivers/Kconfig
drivers/Makefile
drivers/base/Kconfig
drivers/base/Makefile
drivers/base/power/main.c
drivers/base/sw_sync.c [new file with mode: 0644]
drivers/base/sync.c [new file with mode: 0644]
drivers/char/Kconfig
drivers/char/Makefile
drivers/char/dcc_tty.c [new file with mode: 0644]
drivers/char/mem.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Makefile
drivers/cpufreq/cpufreq_interactive.c [new file with mode: 0644]
drivers/cpufreq/cpufreq_stats.c
drivers/cpuidle/governors/menu.c
drivers/gpu/Makefile
drivers/gpu/ion/Kconfig [new file with mode: 0644]
drivers/gpu/ion/Makefile [new file with mode: 0644]
drivers/gpu/ion/ion.c [new file with mode: 0644]
drivers/gpu/ion/ion_carveout_heap.c [new file with mode: 0644]
drivers/gpu/ion/ion_chunk_heap.c [new file with mode: 0644]
drivers/gpu/ion/ion_heap.c [new file with mode: 0644]
drivers/gpu/ion/ion_page_pool.c [new file with mode: 0644]
drivers/gpu/ion/ion_priv.h [new file with mode: 0644]
drivers/gpu/ion/ion_system_heap.c [new file with mode: 0644]
drivers/gpu/ion/ion_system_mapper.c [new file with mode: 0644]
drivers/gpu/ion/tegra/Makefile [new file with mode: 0644]
drivers/gpu/ion/tegra/tegra_ion.c [new file with mode: 0644]
drivers/hid/hid-input.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/iio/industrialio-event.c
drivers/input/Kconfig
drivers/input/Makefile
drivers/input/evdev.c
drivers/input/keyreset.c [new file with mode: 0644]
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/gpio_axis.c [new file with mode: 0644]
drivers/input/misc/gpio_event.c [new file with mode: 0644]
drivers/input/misc/gpio_input.c [new file with mode: 0644]
drivers/input/misc/gpio_matrix.c [new file with mode: 0644]
drivers/input/misc/gpio_output.c [new file with mode: 0644]
drivers/input/misc/keychord.c [new file with mode: 0644]
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/ledtrig-sleep.c [new file with mode: 0644]
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/uid_stat.c [new file with mode: 0644]
drivers/mmc/card/Kconfig
drivers/mmc/card/block.c
drivers/mmc/core/Kconfig
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/core/sdio_bus.c
drivers/mmc/core/sdio_io.c [changed mode: 0644->0755]
drivers/mtd/nand/Kconfig
drivers/net/ppp/Kconfig
drivers/net/ppp/Makefile
drivers/net/ppp/pppolac.c [new file with mode: 0644]
drivers/net/ppp/pppopns.c [new file with mode: 0644]
drivers/net/tun.c
drivers/net/wireless/Kconfig
drivers/power/Kconfig
drivers/power/Makefile
drivers/power/android_battery.c [new file with mode: 0644]
drivers/power/power_supply_core.c
drivers/power/power_supply_sysfs.c
drivers/staging/android/Kconfig
drivers/staging/android/TODO [deleted file]
drivers/staging/android/ashmem.c
drivers/staging/android/logger.c
drivers/staging/android/logger.h
drivers/staging/android/lowmemorykiller.c
drivers/switch/Kconfig [new file with mode: 0644]
drivers/switch/Makefile [new file with mode: 0644]
drivers/switch/switch_class.c [new file with mode: 0644]
drivers/switch/switch_gpio.c [new file with mode: 0644]
drivers/tty/serial/serial_core.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/Makefile
drivers/usb/gadget/android.c [new file with mode: 0644]
drivers/usb/gadget/composite.c
drivers/usb/gadget/f_accessory.c [new file with mode: 0644]
drivers/usb/gadget/f_audio_source.c [new file with mode: 0644]
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/f_mtp.c [new file with mode: 0644]
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/u_serial.c
drivers/usb/gadget/udc-core.c
drivers/usb/otg/Kconfig
drivers/usb/otg/Makefile
drivers/usb/otg/otg-wakelock.c [new file with mode: 0644]
drivers/video/Kconfig
drivers/w1/masters/ds2482.c
fs/fat/dir.c
fs/fat/fat.h
fs/fat/inode.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/proc/base.c
fs/pstore/ram.c
fs/pstore/ram_core.c
include/linux/Kbuild [new file with mode: 0644]
include/linux/amba/mmci.h
include/linux/android_aid.h [new file with mode: 0644]
include/linux/cgroup.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/gpio_event.h [new file with mode: 0644]
include/linux/hid.h
include/linux/if_pppolac.h [new file with mode: 0644]
include/linux/if_pppopns.h [new file with mode: 0644]
include/linux/if_pppox.h
include/linux/ion.h [new file with mode: 0644]
include/linux/kernel.h
include/linux/keychord.h [new file with mode: 0644]
include/linux/keyreset.h [new file with mode: 0644]
include/linux/mm.h
include/linux/mmc/host.h
include/linux/mmc/pm.h
include/linux/mmc/sdio_func.h [changed mode: 0644->0755]
include/linux/netfilter/xt_qtaguid.h [new file with mode: 0644]
include/linux/netfilter/xt_quota2.h [new file with mode: 0644]
include/linux/nmi.h
include/linux/platform_data/android_battery.h [new file with mode: 0644]
include/linux/platform_data/ds2482.h [new file with mode: 0644]
include/linux/power_supply.h
include/linux/pstore_ram.h
include/linux/sched.h
include/linux/serial_core.h
include/linux/sw_sync.h [new file with mode: 0644]
include/linux/switch.h [new file with mode: 0644]
include/linux/sync.h [new file with mode: 0644]
include/linux/uid_stat.h [new file with mode: 0644]
include/linux/usb/f_accessory.h [new file with mode: 0644]
include/linux/usb/f_mtp.h [new file with mode: 0644]
include/linux/wakelock.h [new file with mode: 0644]
include/linux/wifi_tiwlan.h [new file with mode: 0644]
include/linux/wlan_plat.h [new file with mode: 0644]
include/net/activity_stats.h [new file with mode: 0644]
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/sco.h
include/net/tcp.h
include/trace/events/cpufreq_interactive.h [new file with mode: 0644]
include/trace/events/power.h
include/trace/events/sync.h [new file with mode: 0644]
include/uapi/linux/if_pppox.h
include/uapi/linux/input.h
include/uapi/linux/msdos_fs.h
include/uapi/linux/netfilter/xt_IDLETIMER.h
include/uapi/linux/netfilter/xt_socket.h
include/uapi/linux/sockios.h
init/Kconfig
kernel/cgroup.c
kernel/cpu.c
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_io.c
kernel/fork.c
kernel/irq/pm.c
kernel/panic.c
kernel/power/Kconfig
kernel/power/Makefile
kernel/power/suspend.c
kernel/power/suspend_time.c [new file with mode: 0644]
kernel/printk.c
kernel/sched/core.c
kernel/sysctl.c
kernel/watchdog.c
lib/Kconfig.debug
mm/page_alloc.c
mm/shmem.c
mm/vmscan.c
net/Kconfig
net/Makefile
net/activity_stats.c [new file with mode: 0644]
net/bluetooth/af_bluetooth.c
net/bluetooth/amp.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c [changed mode: 0644->0755]
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/sco.c
net/bridge/br_device.c
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/sysfs_net_ipv4.c [new file with mode: 0644]
net/ipv4/tcp.c
net/ipv6/af_inet6.c
net/ipv6/exthdrs_core.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/ip6t_REJECT.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_qtaguid.c [new file with mode: 0644]
net/netfilter/xt_qtaguid_internal.h [new file with mode: 0644]
net/netfilter/xt_qtaguid_print.c [new file with mode: 0644]
net/netfilter/xt_qtaguid_print.h [new file with mode: 0644]
net/netfilter/xt_quota2.c [new file with mode: 0644]
net/netfilter/xt_socket.c
net/rfkill/Kconfig
net/rfkill/core.c
net/wireless/Kconfig
net/wireless/scan.c
net/wireless/sme.c
security/commoncap.c

diff --git a/Documentation/android.txt b/Documentation/android.txt
new file mode 100644 (file)
index 0000000..72a62af
--- /dev/null
@@ -0,0 +1,121 @@
+                               =============
+                               A N D R O I D
+                               =============
+
+Copyright (C) 2009 Google, Inc.
+Written by Mike Chan <mike@android.com>
+
+CONTENTS:
+---------
+
+1. Android
+  1.1 Required enabled config options
+  1.2 Required disabled config options
+  1.3 Recommended enabled config options
+2. Contact
+
+
+1. Android
+==========
+
+Android (www.android.com) is an open source operating system for mobile devices.
+This document describes configurations needed to run the Android framework on
+top of the Linux kernel.
+
+To see a working defconfig look at msm_defconfig or goldfish_defconfig
+which can be found at http://android.git.kernel.org in kernel/common.git
+and kernel/msm.git
+
+
+1.1 Required enabled config options
+-----------------------------------
+After building a standard defconfig, ensure that these options are enabled in
+your .config or defconfig if they are not already. Based off the msm_defconfig.
+You should keep the rest of the default options enabled in the defconfig
+unless you know what you are doing.
+
+ANDROID_PARANOID_NETWORK
+ASHMEM
+CONFIG_FB_MODE_HELPERS
+CONFIG_FONT_8x16
+CONFIG_FONT_8x8
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+DAB
+EARLYSUSPEND
+FB
+FB_CFB_COPYAREA
+FB_CFB_FILLRECT
+FB_CFB_IMAGEBLIT
+FB_DEFERRED_IO
+FB_TILEBLITTING
+HIGH_RES_TIMERS
+INOTIFY
+INOTIFY_USER
+INPUT_EVDEV
+INPUT_GPIO
+INPUT_MISC
+LEDS_CLASS
+LEDS_GPIO
+LOCK_KERNEL
+LkOGGER
+LOW_MEMORY_KILLER
+MISC_DEVICES
+NEW_LEDS
+NO_HZ
+POWER_SUPPLY
+PREEMPT
+RAMFS
+RTC_CLASS
+RTC_LIB
+SWITCH
+SWITCH_GPIO
+TMPFS
+UID_STAT
+UID16
+USB_FUNCTION
+USB_FUNCTION_ADB
+USER_WAKELOCK
+VIDEO_OUTPUT_CONTROL
+WAKELOCK
+YAFFS_AUTO_YAFFS2
+YAFFS_FS
+YAFFS_YAFFS1
+YAFFS_YAFFS2
+
+
+1.2 Required disabled config options
+------------------------------------
+CONFIG_YAFFS_DISABLE_LAZY_LOAD
+DNOTIFY
+
+
+1.3 Recommended enabled config options
+------------------------------
+ANDROID_PMEM
+ANDROID_RAM_CONSOLE
+ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+SCHEDSTATS
+DEBUG_PREEMPT
+DEBUG_MUTEXES
+DEBUG_SPINLOCK_SLEEP
+DEBUG_INFO
+FRAME_POINTER
+CPU_FREQ
+CPU_FREQ_TABLE
+CPU_FREQ_DEFAULT_GOV_ONDEMAND
+CPU_FREQ_GOV_ONDEMAND
+CRC_CCITT
+EMBEDDED
+INPUT_TOUCHSCREEN
+I2C
+I2C_BOARDINFO
+LOG_BUF_SHIFT=17
+SERIAL_CORE
+SERIAL_CORE_CONSOLE
+
+
+2. Contact
+==========
+website: http://android.git.kernel.org
+
+mailing-lists: android-kernel@googlegroups.com
index bcf1a00b06a1e7e09042c5dfc354ce3b94e45ae0..1efefbf3074378572e58c7242c4749a9a5ab9abe 100644 (file)
@@ -597,6 +597,15 @@ is completely unused; @cgrp->parent is still valid. (Note - can also
 be called for a newly-created cgroup if an error occurs after this
 subsystem's create() method has been called for the new cgroup).
 
+int allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+(cgroup_mutex held by caller)
+
+Called prior to moving a task into a cgroup; if the subsystem
+returns an error, this will abort the attach operation.  Used
+to extend the permission checks - if all subsystems in a cgroup
+return 0, the attach will be allowed to proceed, even if the
+default permission check (root or same user) fails.
+
 int can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 (cgroup_mutex held by caller)
 
index c7a2eb8450c226b50e55b28f8c6669c6041cc0e3..b4ae5e681a6e57aa7a4b8f65f664d469459c5579 100644 (file)
@@ -28,6 +28,7 @@ Contents:
 2.3  Userspace
 2.4  Ondemand
 2.5  Conservative
+2.6  Interactive
 
 3.   The Governor Interface in the CPUfreq Core
 
@@ -191,6 +192,81 @@ governor but for the opposite direction.  For example when set to its
 default value of '20' it means that if the CPU usage needs to be below
 20% between samples to have the frequency decreased.
 
+
+2.6 Interactive
+---------------
+
+The CPUfreq governor "interactive" is designed for latency-sensitive,
+interactive workloads. This governor sets the CPU speed depending on
+usage, similar to "ondemand" and "conservative" governors, but with a
+different set of configurable behaviors.
+
+The tuneable values for this governor are:
+
+target_loads: CPU load values used to adjust speed to influence the
+current CPU load toward that value.  In general, the lower the target
+load, the more often the governor will raise CPU speeds to bring load
+below the target.  The format is a single target load, optionally
+followed by pairs of CPU speeds and CPU loads to target at or above
+those speeds.  Colons can be used between the speeds and associated
+target loads for readability.  For example:
+
+   85 1000000:90 1700000:99
+
+targets CPU load 85% below speed 1GHz, 90% at or above 1GHz, until
+1.7GHz and above, at which load 99% is targeted.  If speeds are
+specified these must appear in ascending order.  Higher target load
+values are typically specified for higher speeds, that is, target load
+values also usually appear in an ascending order. The default is
+target load 90% for all speeds.
+
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. Default is 80000 uS.
+
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load.  If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher.  Default is the maximum speed
+allowed by the policy at governor initialization time.
+
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
+
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
+Default is 20000 uS.
+
+timer_rate: Sample rate for reevaluating CPU load when the CPU is not
+idle.  A deferrable timer is used, such that the CPU will not be woken
+from idle to service this timer until something else needs to run.
+(The maximum time to allow deferring this timer when not running at
+minimum speed is configurable via timer_slack.)  Default is 20000 uS.
+
+timer_slack: Maximum additional time to defer handling the governor
+sampling timer beyond timer_rate when running at speeds above the
+minimum.  For platforms that consume additional power at idle when
+CPUs are running at speeds greater than minimum, this places an upper
+bound on how long the timer will be deferred prior to re-evaluating
+load and dropping speed.  For example, if timer_rate is 20000uS and
+timer_slack is 10000uS then timers will be deferred for up to 30msec
+when not at lowest speed.  A value of -1 means defer timers
+indefinitely at all speeds.  Default is 80000 uS.
+
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute.  If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
+
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual.  Default is 80000 uS.
+
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
diff --git a/Documentation/sync.txt b/Documentation/sync.txt
new file mode 100644 (file)
index 0000000..a2d05e7
--- /dev/null
@@ -0,0 +1,75 @@
+Motivation:
+
+In complicated DMA pipelines such as graphics (multimedia, camera, gpu, display)
+a consumer of a buffer needs to know when the producer has finished producing
+it.  Likewise the producer needs to know when the consumer is finished with the
+buffer so it can reuse it.  A particular buffer may be consumed by multiple
+consumers which will retain the buffer for different amounts of time.  In
+addition, a consumer may consume multiple buffers atomically.
+The sync framework adds an API which allows synchronization between the
+producers and consumers in a generic way while also allowing platforms which
+have shared hardware synchronization primitives to exploit them.
+
+Goals:
+       * provide a generic API for expressing synchronization dependencies
+       * allow drivers to exploit hardware synchronization between hardware
+         blocks
+       * provide a userspace API that allows a compositor to manage
+         dependencies.
+       * provide rich telemetry data to allow debugging slowdowns and stalls of
+          the graphics pipeline.
+
+Objects:
+       * sync_timeline
+       * sync_pt
+       * sync_fence
+
+sync_timeline:
+
+A sync_timeline is an abstract monotonically increasing counter. In general,
+each driver/hardware block context will have one of these.  They can be backed
+by the appropriate hardware or rely on the generic sw_sync implementation.
+Timelines are only ever created through their specific implementations
+(i.e. sw_sync.)
+
+sync_pt:
+
+A sync_pt is an abstract value which marks a point on a sync_timeline. Sync_pts
+have a single timeline parent.  They have 3 states: active, signaled, and error.
+They start in active state and transition, once, to either signaled (when the
+timeline counter advances beyond the sync_pt’s value) or error state.
+
+sync_fence:
+
+Sync_fences are the primary primitives used by drivers to coordinate
+synchronization of their buffers.  They are a collection of sync_pts which may
+or may not have the same timeline parent.  A sync_pt can only exist in one fence
+and the fence's list of sync_pts is immutable once created.  Fences can be
+waited on synchronously or asynchronously.  Two fences can also be merged to
+create a third fence containing a copy of the two fences’ sync_pts.  Fences are
+backed by file descriptors to allow userspace to coordinate the display pipeline
+dependencies.
+
+Use:
+
+A driver implementing sync support should have a work submission function which:
+     * takes a fence argument specifying when to begin work
+     * asynchronously queues that work to kick off when the fence is signaled
+     * returns a fence to indicate when its work will be done.
+     * signals the returned fence once the work is completed.
+
+Consider an imaginary display driver that has the following API:
+/*
+ * assumes buf is ready to be displayed.
+ * blocks until the buffer is on screen.
+ */
+    void display_buffer(struct dma_buf *buf);
+
+The new API will become:
+/*
+ * will display buf when fence is signaled.
+ * returns immediately with a fence that will signal when buf
+ * is no longer displayed.
+ */
+struct sync_fence* display_buffer(struct dma_buf *buf,
+                                 struct sync_fence *fence);
index 30cd326b5b757866431c7f67ad522bbc5ccd49c2..1def7c896b50902dc72b60cd61706fef9416bad1 100644 (file)
@@ -1876,6 +1876,15 @@ config XEN
        help
          Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
+config ARM_FLUSH_CONSOLE_ON_RESTART
+       bool "Force flush the console on restart"
+       help
+         If the console is locked while the system is rebooted, the messages
+         in the temporary logbuffer would not have propogated to all the
+         console drivers. This option forces the console lock to be
+         released if it failed to be acquired, which will cause all the
+         pending messages to be flushed.
+
 endmenu
 
 menu "Boot options"
index 661030d6bc6c3d10f3ca35f6b8125d21adac7389..f14bbd556083ff6f64af84f27cb8259aadcf8a5f 100644 (file)
@@ -63,6 +63,27 @@ config DEBUG_USER
              8 - SIGSEGV faults
             16 - SIGBUS faults
 
+config DEBUG_RODATA
+       bool "Write protect kernel text section"
+       default n
+       depends on DEBUG_KERNEL && MMU
+       ---help---
+         Mark the kernel text section as write-protected in the pagetables,
+         in order to catch accidental (and incorrect) writes to such const
+         data. This will cause the size of the kernel, plus up to 4MB, to
+         be mapped as pages instead of sections, which will increase TLB
+         pressure.
+         If in doubt, say "N".
+
+config DEBUG_RODATA_TEST
+       bool "Testcase for the DEBUG_RODATA feature"
+       depends on DEBUG_RODATA
+       default n
+       ---help---
+         This option enables a testcase for the DEBUG_RODATA
+         feature.
+         If in doubt, say "N"
+
 # These options are only for real kernel hackers who want to get their hands dirty.
 config DEBUG_LL
        bool "Kernel low-level debugging functions (read help!)"
index 3b0b21af089f8a2c86f93134a205b1e7cee7d92a..0ccfd668bd8fd2cde51ad96dc79116e27837142a 100644 (file)
@@ -721,6 +721,7 @@ __armv7_mmu_cache_on:
                bic     r6, r6, #1 << 31        @ 32-bit translation system
                bic     r6, r6, #3 << 0         @ use only ttbr0
                mcrne   p15, 0, r3, c2, c0, 0   @ load page table pointer
+               mcrne   p15, 0, r0, c8, c7, 0   @ flush I,D TLBs
                mcrne   p15, 0, r1, c3, c0, 0   @ load domain access control
                mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
 #endif
@@ -810,6 +811,8 @@ call_cache_fn:      adr     r12, proc_types
                .align  2
                .type   proc_types,#object
 proc_types:
+#if !defined(CONFIG_CPU_V7)
+               /* This collides with some V7 IDs, preventing correct detection */
                .word   0x00000000              @ old ARM ID
                .word   0x0000f000
                mov     pc, lr
@@ -818,6 +821,7 @@ proc_types:
  THUMB(                nop                             )
                mov     pc, lr
  THUMB(                nop                             )
+#endif
 
                .word   0x41007000              @ ARM7/710
                .word   0xfff8fe00
index 9e32d0d866599e7a6ed5be84f811f9590fbe2608..92efb7a692f1fb77033b2011bf13f35af2516000 100644 (file)
@@ -43,3 +43,53 @@ config SHARP_SCOOP
 
 config TI_PRIV_EDMA
        bool
+
+config FIQ_GLUE
+       bool
+       select FIQ
+
+config FIQ_DEBUGGER
+       bool "FIQ Mode Serial Debugger"
+       select FIQ
+       select FIQ_GLUE
+       default n
+       help
+         The FIQ serial debugger can accept commands even when the
+         kernel is unresponsive due to being stuck with interrupts
+         disabled.
+
+
+config FIQ_DEBUGGER_NO_SLEEP
+       bool "Keep serial debugger active"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Enables the serial debugger at boot. Passing
+         fiq_debugger.no_sleep on the kernel commandline will
+         override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+       bool "Don't disable wakeup IRQ when debugger is active"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Don't disable the wakeup irq when enabling the uart clock.  This will
+         cause extra interrupts, but it makes the serial debugger usable with
+         on some MSM radio builds that ignore the uart clock request in power
+         collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+       bool "Console on FIQ Serial Debugger port"
+       depends on FIQ_DEBUGGER
+       default n
+       help
+         Enables a console so that printk messages are displayed on
+         the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+       bool "Put the FIQ debugger into console mode by default"
+       depends on FIQ_DEBUGGER_CONSOLE
+       default n
+       help
+         If enabled, this puts the fiq debugger into console mode by default.
+         Otherwise, the fiq debugger will start out in debug mode.
index d09a39b1b5d40583106eca52d1f0bdff60b8a6da..4c2748c06d5ab000d08059ec555c302aad3906d2 100644 (file)
@@ -14,3 +14,5 @@ obj-$(CONFIG_SHARP_SCOOP)     += scoop.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
 obj-$(CONFIG_ARM_TIMER_SP804)  += timer-sp.o
 obj-$(CONFIG_TI_PRIV_EDMA)     += edma.o
+obj-$(CONFIG_FIQ_GLUE)         += fiq_glue.o fiq_glue_setup.o
+obj-$(CONFIG_FIQ_DEBUGGER)     += fiq_debugger.o
diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c
new file mode 100644 (file)
index 0000000..fa58128
--- /dev/null
@@ -0,0 +1,1380 @@
+/*
+ * arch/arm/common/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/kmsg_dump.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+
+#include <asm/fiq_debugger.h>
+#include <asm/fiq_glue.h>
+#include <asm/stacktrace.h>
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define MAX_FIQ_DEBUGGER_PORTS 4
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+               ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_state {
+       struct fiq_glue_handler handler;
+
+       int fiq;
+       int uart_irq;
+       int signal_irq;
+       int wakeup_irq;
+       bool wakeup_irq_no_set_wake;
+       struct clk *clk;
+       struct fiq_debugger_pdata *pdata;
+       struct platform_device *pdev;
+
+       char debug_cmd[DEBUG_MAX];
+       int debug_busy;
+       int debug_abort;
+
+       char debug_buf[DEBUG_MAX];
+       int debug_count;
+
+       bool no_sleep;
+       bool debug_enable;
+       bool ignore_next_wakeup_irq;
+       struct timer_list sleep_timer;
+       spinlock_t sleep_timer_lock;
+       bool uart_enabled;
+       struct wake_lock debugger_wake_lock;
+       bool console_enable;
+       int current_cpu;
+       atomic_t unhandled_fiq_count;
+       bool in_fiq;
+
+       struct work_struct work;
+       spinlock_t work_lock;
+       char work_cmd[DEBUG_MAX];
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+       spinlock_t console_lock;
+       struct console console;
+       struct tty_port tty_port;
+       struct fiq_debugger_ringbuf *tty_rbuf;
+       bool syslog_dumping;
+#endif
+
+       unsigned int last_irqs[NR_IRQS];
+       unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+struct tty_driver *fiq_tty_driver;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+static bool fiq_kgdb_enable;
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline void enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+       if (state->wakeup_irq < 0)
+               return;
+       enable_irq(state->wakeup_irq);
+       if (!state->wakeup_irq_no_set_wake)
+               enable_irq_wake(state->wakeup_irq);
+}
+static inline void disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+       if (state->wakeup_irq < 0)
+               return;
+       disable_irq_nosync(state->wakeup_irq);
+       if (!state->wakeup_irq_no_set_wake)
+               disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static bool inline debug_have_fiq(struct fiq_debugger_state *state)
+{
+       return (state->fiq >= 0);
+}
+
+static void debug_force_irq(struct fiq_debugger_state *state)
+{
+       unsigned int irq = state->signal_irq;
+
+       if (WARN_ON(!debug_have_fiq(state)))
+               return;
+       if (state->pdata->force_irq) {
+               state->pdata->force_irq(state->pdev, irq);
+       } else {
+               struct irq_chip *chip = irq_get_chip(irq);
+               if (chip && chip->irq_retrigger)
+                       chip->irq_retrigger(irq_get_irq_data(irq));
+       }
+}
+
+static void debug_uart_enable(struct fiq_debugger_state *state)
+{
+       if (state->clk)
+               clk_enable(state->clk);
+       if (state->pdata->uart_enable)
+               state->pdata->uart_enable(state->pdev);
+}
+
+static void debug_uart_disable(struct fiq_debugger_state *state)
+{
+       if (state->pdata->uart_disable)
+               state->pdata->uart_disable(state->pdev);
+       if (state->clk)
+               clk_disable(state->clk);
+}
+
+static void debug_uart_flush(struct fiq_debugger_state *state)
+{
+       if (state->pdata->uart_flush)
+               state->pdata->uart_flush(state->pdev);
+}
+
+static void debug_putc(struct fiq_debugger_state *state, char c)
+{
+       state->pdata->uart_putc(state->pdev, c);
+}
+
+static void debug_puts(struct fiq_debugger_state *state, char *s)
+{
+       unsigned c;
+       while ((c = *s++)) {
+               if (c == '\n')
+                       debug_putc(state, '\r');
+               debug_putc(state, c);
+       }
+}
+
+static void debug_prompt(struct fiq_debugger_state *state)
+{
+       debug_puts(state, "debug> ");
+}
+
+static void dump_kernel_log(struct fiq_debugger_state *state)
+{
+       char buf[512];
+       size_t len;
+       struct kmsg_dumper dumper = { .active = true };
+
+
+       kmsg_dump_rewind_nolock(&dumper);
+       while (kmsg_dump_get_line_nolock(&dumper, true, buf,
+                                        sizeof(buf) - 1, &len)) {
+               buf[len] = 0;
+               debug_puts(state, buf);
+       }
+}
+
+static char *mode_name(unsigned cpsr)
+{
+       switch (cpsr & MODE_MASK) {
+       case USR_MODE: return "USR";
+       case FIQ_MODE: return "FIQ";
+       case IRQ_MODE: return "IRQ";
+       case SVC_MODE: return "SVC";
+       case ABT_MODE: return "ABT";
+       case UND_MODE: return "UND";
+       case SYSTEM_MODE: return "SYS";
+       default: return "???";
+       }
+}
+
+static int debug_printf(void *cookie, const char *fmt, ...)
+{
+       struct fiq_debugger_state *state = cookie;
+       char buf[256];
+       va_list ap;
+
+       va_start(ap, fmt);
+       vsnprintf(buf, sizeof(buf), fmt, ap);
+       va_end(ap);
+
+       debug_puts(state, buf);
+       return state->debug_abort;
+}
+
+/* Safe outside fiq context */
+static int debug_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+       struct fiq_debugger_state *state = cookie;
+       char buf[256];
+       va_list ap;
+       unsigned long irq_flags;
+
+       va_start(ap, fmt);
+       vsnprintf(buf, 128, fmt, ap);
+       va_end(ap);
+
+       local_irq_save(irq_flags);
+       debug_puts(state, buf);
+       debug_uart_flush(state);
+       local_irq_restore(irq_flags);
+       return state->debug_abort;
+}
+
+static void dump_regs(struct fiq_debugger_state *state, unsigned *regs)
+{
+       debug_printf(state, " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+                       regs[0], regs[1], regs[2], regs[3]);
+       debug_printf(state, " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+                       regs[4], regs[5], regs[6], regs[7]);
+       debug_printf(state, " r8 %08x  r9 %08x r10 %08x r11 %08x  mode %s\n",
+                       regs[8], regs[9], regs[10], regs[11],
+                       mode_name(regs[16]));
+       if ((regs[16] & MODE_MASK) == USR_MODE)
+               debug_printf(state, " ip %08x  sp %08x  lr %08x  pc %08x  "
+                               "cpsr %08x\n", regs[12], regs[13], regs[14],
+                               regs[15], regs[16]);
+       else
+               debug_printf(state, " ip %08x  sp %08x  lr %08x  pc %08x  "
+                               "cpsr %08x  spsr %08x\n", regs[12], regs[13],
+                               regs[14], regs[15], regs[16], regs[17]);
+}
+
+struct mode_regs {
+       unsigned long sp_svc;
+       unsigned long lr_svc;
+       unsigned long spsr_svc;
+
+       unsigned long sp_abt;
+       unsigned long lr_abt;
+       unsigned long spsr_abt;
+
+       unsigned long sp_und;
+       unsigned long lr_und;
+       unsigned long spsr_und;
+
+       unsigned long sp_irq;
+       unsigned long lr_irq;
+       unsigned long spsr_irq;
+
+       unsigned long r8_fiq;
+       unsigned long r9_fiq;
+       unsigned long r10_fiq;
+       unsigned long r11_fiq;
+       unsigned long r12_fiq;
+       unsigned long sp_fiq;
+       unsigned long lr_fiq;
+       unsigned long spsr_fiq;
+};
+
+void __naked get_mode_regs(struct mode_regs *regs)
+{
+       asm volatile (
+       "mrs    r1, cpsr\n"
+       "msr    cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r13 - r14}\n"
+       "mrs    r2, spsr\n"
+       "msr    cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+       "stmia  r0!, {r2, r8 - r14}\n"
+       "mrs    r2, spsr\n"
+       "stmia  r0!, {r2}\n"
+       "msr    cpsr_c, r1\n"
+       "bx     lr\n");
+}
+
+
+static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs)
+{
+       struct mode_regs mode_regs;
+       dump_regs(state, regs);
+       get_mode_regs(&mode_regs);
+       debug_printf(state, " svc: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+       debug_printf(state, " abt: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+       debug_printf(state, " und: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+       debug_printf(state, " irq: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+       debug_printf(state, " fiq: r8 %08x  r9 %08x  r10 %08x  r11 %08x  "
+                       "r12 %08x\n",
+                       mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+                       mode_regs.r11_fiq, mode_regs.r12_fiq);
+       debug_printf(state, " fiq: sp %08x  lr %08x  spsr %08x\n",
+                       mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+static void dump_irqs(struct fiq_debugger_state *state)
+{
+       int n;
+       struct irq_desc *desc;
+
+       debug_printf(state, "irqnr       total  since-last   status  name\n");
+       for_each_irq_desc(n, desc) {
+               struct irqaction *act = desc->action;
+               if (!act && !kstat_irqs(n))
+                       continue;
+               debug_printf(state, "%5d: %10u %11u %8x  %s\n", n,
+                       kstat_irqs(n),
+                       kstat_irqs(n) - state->last_irqs[n],
+                       desc->status_use_accessors,
+                       (act && act->name) ? act->name : "???");
+               state->last_irqs[n] = kstat_irqs(n);
+       }
+}
+
+struct stacktrace_state {
+       struct fiq_debugger_state *state;
+       unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+       struct stacktrace_state *sts = d;
+
+       if (sts->depth) {
+               debug_printf(sts->state,
+                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+                       frame->pc, frame->pc, frame->lr, frame->lr,
+                       frame->sp, frame->fp);
+               sts->depth--;
+               return 0;
+       }
+       debug_printf(sts->state, "  ...\n");
+
+       return sts->depth == 0;
+}
+
+struct frame_tail {
+       struct frame_tail *fp;
+       unsigned long sp;
+       unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_state *state,
+                                       struct frame_tail *tail)
+{
+       struct frame_tail buftail[2];
+
+       /* Also check accessibility of one struct frame_tail beyond */
+       if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+               debug_printf(state, "  invalid frame pointer %p\n", tail);
+               return NULL;
+       }
+       if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+               debug_printf(state,
+                       "  failed to copy frame pointer %p\n", tail);
+               return NULL;
+       }
+
+       debug_printf(state, "  %p\n", buftail[0].lr);
+
+       /* frame pointers should strictly progress back up the stack
+        * (towards higher addresses) */
+       if (tail >= buftail[0].fp)
+               return NULL;
+
+       return buftail[0].fp-1;
+}
+
+void dump_stacktrace(struct fiq_debugger_state *state,
+               struct pt_regs * const regs, unsigned int depth, void *ssp)
+{
+       struct frame_tail *tail;
+       struct thread_info *real_thread_info = THREAD_INFO(ssp);
+       struct stacktrace_state sts;
+
+       sts.depth = depth;
+       sts.state = state;
+       *current_thread_info() = *real_thread_info;
+
+       if (!current)
+               debug_printf(state, "current NULL\n");
+       else
+               debug_printf(state, "pid: %d  comm: %s\n",
+                       current->pid, current->comm);
+       dump_regs(state, (unsigned *)regs);
+
+       if (!user_mode(regs)) {
+               struct stackframe frame;
+               frame.fp = regs->ARM_fp;
+               frame.sp = regs->ARM_sp;
+               frame.lr = regs->ARM_lr;
+               frame.pc = regs->ARM_pc;
+               debug_printf(state,
+                       "  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+                       regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+                       regs->ARM_sp, regs->ARM_fp);
+               walk_stackframe(&frame, report_trace, &sts);
+               return;
+       }
+
+       tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+       while (depth-- && tail && !((unsigned long) tail & 3))
+               tail = user_backtrace(state, tail);
+}
+
+static void do_ps(struct fiq_debugger_state *state)
+{
+       struct task_struct *g;
+       struct task_struct *p;
+       unsigned task_state;
+       static const char stat_nam[] = "RSDTtZX";
+
+       debug_printf(state, "pid   ppid  prio task            pc\n");
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+               task_state = p->state ? __ffs(p->state) + 1 : 0;
+               debug_printf(state,
+                            "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+               debug_printf(state, "%-13.13s %c", p->comm,
+                            task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+               if (task_state == TASK_RUNNING)
+                       debug_printf(state, " running\n");
+               else
+                       debug_printf(state, " %08lx\n", thread_saved_pc(p));
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+       state->syslog_dumping = true;
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+       state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+       do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+       dump_kernel_log(state);
+}
+#endif
+
+static void do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+       if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) {
+               debug_printf(state, "sysrq-g blocked\n");
+               return;
+       }
+       begin_syslog_dump(state);
+       handle_sysrq(rq);
+       end_syslog_dump(state);
+}
+
+#ifdef CONFIG_KGDB
+static void do_kgdb(struct fiq_debugger_state *state)
+{
+       if (!fiq_kgdb_enable) {
+               debug_printf(state, "kgdb through fiq debugger not enabled\n");
+               return;
+       }
+
+       debug_printf(state, "enabling console and triggering kgdb\n");
+       state->console_enable = true;
+       handle_sysrq('g');
+}
+#endif
+
+static void debug_schedule_work(struct fiq_debugger_state *state, char *cmd)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->work_lock, flags);
+       if (state->work_cmd[0] != '\0') {
+               debug_printf(state, "work command processor busy\n");
+               spin_unlock_irqrestore(&state->work_lock, flags);
+               return;
+       }
+
+       strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd));
+       spin_unlock_irqrestore(&state->work_lock, flags);
+
+       schedule_work(&state->work);
+}
+
+static void debug_work(struct work_struct *work)
+{
+       struct fiq_debugger_state *state;
+       char work_cmd[DEBUG_MAX];
+       char *cmd;
+       unsigned long flags;
+
+       state = container_of(work, struct fiq_debugger_state, work);
+
+       spin_lock_irqsave(&state->work_lock, flags);
+
+       strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd));
+       state->work_cmd[0] = '\0';
+
+       spin_unlock_irqrestore(&state->work_lock, flags);
+
+       cmd = work_cmd;
+       if (!strncmp(cmd, "reboot", 6)) {
+               cmd += 6;
+               while (*cmd == ' ')
+                       cmd++;
+               if (cmd != '\0')
+                       kernel_restart(cmd);
+               else
+                       kernel_restart(NULL);
+       } else {
+               debug_printf(state, "unknown work command '%s'\n", work_cmd);
+       }
+}
+
+/* This function CANNOT be called in FIQ context */
+static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+       if (!strcmp(cmd, "ps"))
+               do_ps(state);
+       if (!strcmp(cmd, "sysrq"))
+               do_sysrq(state, 'h');
+       if (!strncmp(cmd, "sysrq ", 6))
+               do_sysrq(state, cmd[6]);
+#ifdef CONFIG_KGDB
+       if (!strcmp(cmd, "kgdb"))
+               do_kgdb(state);
+#endif
+       if (!strncmp(cmd, "reboot", 6))
+               debug_schedule_work(state, cmd);
+}
+
+static void debug_help(struct fiq_debugger_state *state)
+{
+       debug_printf(state,     "FIQ Debugger commands:\n"
+                               " pc            PC status\n"
+                               " regs          Register dump\n"
+                               " allregs       Extended Register dump\n"
+                               " bt            Stack trace\n"
+                               " reboot [<c>]  Reboot with command <c>\n"
+                               " reset [<c>]   Hard reset with command <c>\n"
+                               " irqs          Interupt status\n"
+                               " kmsg          Kernel log\n"
+                               " version       Kernel version\n");
+       debug_printf(state,     " sleep         Allow sleep while in FIQ\n"
+                               " nosleep       Disable sleep while in FIQ\n"
+                               " console       Switch terminal to console\n"
+                               " cpu           Current CPU\n"
+                               " cpu <number>  Switch to CPU<number>\n");
+       debug_printf(state,     " ps            Process list\n"
+                               " sysrq         sysrq options\n"
+                               " sysrq <param> Execute sysrq with <param>\n");
+#ifdef CONFIG_KGDB
+       debug_printf(state,     " kgdb          Enter kernel debugger\n");
+#endif
+}
+
+static void take_affinity(void *info)
+{
+       struct fiq_debugger_state *state = info;
+       struct cpumask cpumask;
+
+       cpumask_clear(&cpumask);
+       cpumask_set_cpu(get_cpu(), &cpumask);
+
+       irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+       if (!debug_have_fiq(state))
+               smp_call_function_single(cpu, take_affinity, state, false);
+       state->current_cpu = cpu;
+}
+
+static bool debug_fiq_exec(struct fiq_debugger_state *state,
+                       const char *cmd, unsigned *regs, void *svc_sp)
+{
+       bool signal_helper = false;
+
+       if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+               debug_help(state);
+       } else if (!strcmp(cmd, "pc")) {
+               debug_printf(state, " pc %08x cpsr %08x mode %s\n",
+                       regs[15], regs[16], mode_name(regs[16]));
+       } else if (!strcmp(cmd, "regs")) {
+               dump_regs(state, regs);
+       } else if (!strcmp(cmd, "allregs")) {
+               dump_allregs(state, regs);
+       } else if (!strcmp(cmd, "bt")) {
+               dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp);
+       } else if (!strncmp(cmd, "reset", 5)) {
+               cmd += 5;
+               while (*cmd == ' ')
+                       cmd++;
+               if (*cmd) {
+                       char tmp_cmd[32];
+                       strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
+                       machine_restart(tmp_cmd);
+               } else {
+                       machine_restart(NULL);
+               }
+       } else if (!strcmp(cmd, "irqs")) {
+               dump_irqs(state);
+       } else if (!strcmp(cmd, "kmsg")) {
+               dump_kernel_log(state);
+       } else if (!strcmp(cmd, "version")) {
+               debug_printf(state, "%s\n", linux_banner);
+       } else if (!strcmp(cmd, "sleep")) {
+               state->no_sleep = false;
+               debug_printf(state, "enabling sleep\n");
+       } else if (!strcmp(cmd, "nosleep")) {
+               state->no_sleep = true;
+               debug_printf(state, "disabling sleep\n");
+       } else if (!strcmp(cmd, "console")) {
+               debug_printf(state, "console mode\n");
+               debug_uart_flush(state);
+               state->console_enable = true;
+       } else if (!strcmp(cmd, "cpu")) {
+               debug_printf(state, "cpu %d\n", state->current_cpu);
+       } else if (!strncmp(cmd, "cpu ", 4)) {
+               unsigned long cpu = 0;
+               if (strict_strtoul(cmd + 4, 10, &cpu) == 0)
+                       switch_cpu(state, cpu);
+               else
+                       debug_printf(state, "invalid cpu\n");
+               debug_printf(state, "cpu %d\n", state->current_cpu);
+       } else {
+               if (state->debug_busy) {
+                       debug_printf(state,
+                               "command processor busy. trying to abort.\n");
+                       state->debug_abort = -1;
+               } else {
+                       strcpy(state->debug_cmd, cmd);
+                       state->debug_busy = 1;
+               }
+
+               return true;
+       }
+       if (!state->console_enable)
+               debug_prompt(state);
+
+       return signal_helper;
+}
+
+static void sleep_timer_expired(unsigned long data)
+{
+       struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->sleep_timer_lock, flags);
+       if (state->uart_enabled && !state->no_sleep) {
+               if (state->debug_enable && !state->console_enable) {
+                       state->debug_enable = false;
+                       debug_printf_nfiq(state, "suspending fiq debugger\n");
+               }
+               state->ignore_next_wakeup_irq = true;
+               debug_uart_disable(state);
+               state->uart_enabled = false;
+               enable_wakeup_irq(state);
+       }
+       wake_unlock(&state->debugger_wake_lock);
+       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void handle_wakeup(struct fiq_debugger_state *state)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&state->sleep_timer_lock, flags);
+       if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+               state->ignore_next_wakeup_irq = false;
+       } else if (!state->uart_enabled) {
+               wake_lock(&state->debugger_wake_lock);
+               debug_uart_enable(state);
+               state->uart_enabled = true;
+               disable_wakeup_irq(state);
+               mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+       }
+       spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static irqreturn_t wakeup_irq_handler(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+
+       if (!state->no_sleep)
+               debug_puts(state, "WAKEUP\n");
+       handle_wakeup(state);
+
+       return IRQ_HANDLED;
+}
+
+static void debug_handle_console_irq_context(struct fiq_debugger_state *state)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       struct tty_struct *tty;
+
+       tty = tty_port_tty_get(&state->tty_port);
+       if (tty) {
+               int i;
+               int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+               for (i = 0; i < count; i++) {
+                       int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+                       tty_insert_flip_char(tty, c, TTY_NORMAL);
+                       if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+                               pr_warn("fiq tty failed to consume byte\n");
+               }
+               tty_flip_buffer_push(tty);
+               tty_kref_put(tty);
+       }
+#endif
+}
+
+static void debug_handle_irq_context(struct fiq_debugger_state *state)
+{
+       if (!state->no_sleep) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&state->sleep_timer_lock, flags);
+               wake_lock(&state->debugger_wake_lock);
+               mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+               spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+       }
+       debug_handle_console_irq_context(state);
+       if (state->debug_busy) {
+               debug_irq_exec(state, state->debug_cmd);
+               if (!state->console_enable)
+                       debug_prompt(state);
+               state->debug_busy = 0;
+       }
+}
+
+static int debug_getc(struct fiq_debugger_state *state)
+{
+       return state->pdata->uart_getc(state->pdev);
+}
+
+static bool debug_handle_uart_interrupt(struct fiq_debugger_state *state,
+                       int this_cpu, void *regs, void *svc_sp)
+{
+       int c;
+       static int last_c;
+       int count = 0;
+       bool signal_helper = false;
+
+       if (this_cpu != state->current_cpu) {
+               if (state->in_fiq)
+                       return false;
+
+               if (atomic_inc_return(&state->unhandled_fiq_count) !=
+                                       MAX_UNHANDLED_FIQ_COUNT)
+                       return false;
+
+               debug_printf(state, "fiq_debugger: cpu %d not responding, "
+                       "reverting to cpu %d\n", state->current_cpu,
+                       this_cpu);
+
+               atomic_set(&state->unhandled_fiq_count, 0);
+               switch_cpu(state, this_cpu);
+               return false;
+       }
+
+       state->in_fiq = true;
+
+       while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+               count++;
+               if (!state->debug_enable) {
+                       if ((c == 13) || (c == 10)) {
+                               state->debug_enable = true;
+                               state->debug_count = 0;
+                               debug_prompt(state);
+                       }
+               } else if (c == FIQ_DEBUGGER_BREAK) {
+                       state->console_enable = false;
+                       debug_puts(state, "fiq debugger mode\n");
+                       state->debug_count = 0;
+                       debug_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+               } else if (state->console_enable && state->tty_rbuf) {
+                       fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+                       signal_helper = true;
+#endif
+               } else if ((c >= ' ') && (c < 127)) {
+                       if (state->debug_count < (DEBUG_MAX - 1)) {
+                               state->debug_buf[state->debug_count++] = c;
+                               debug_putc(state, c);
+                       }
+               } else if ((c == 8) || (c == 127)) {
+                       if (state->debug_count > 0) {
+                               state->debug_count--;
+                               debug_putc(state, 8);
+                               debug_putc(state, ' ');
+                               debug_putc(state, 8);
+                       }
+               } else if ((c == 13) || (c == 10)) {
+                       if (c == '\r' || (c == '\n' && last_c != '\r')) {
+                               debug_putc(state, '\r');
+                               debug_putc(state, '\n');
+                       }
+                       if (state->debug_count) {
+                               state->debug_buf[state->debug_count] = 0;
+                               state->debug_count = 0;
+                               signal_helper |=
+                                       debug_fiq_exec(state, state->debug_buf,
+                                                      regs, svc_sp);
+                       } else {
+                               debug_prompt(state);
+                       }
+               }
+               last_c = c;
+       }
+       if (!state->console_enable)
+               debug_uart_flush(state);
+       if (state->pdata->fiq_ack)
+               state->pdata->fiq_ack(state->pdev, state->fiq);
+
+       /* poke sleep timer if necessary */
+       if (state->debug_enable && !state->no_sleep)
+               signal_helper = true;
+
+       atomic_set(&state->unhandled_fiq_count, 0);
+       state->in_fiq = false;
+
+       return signal_helper;
+}
+
+static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp)
+{
+       struct fiq_debugger_state *state =
+               container_of(h, struct fiq_debugger_state, handler);
+       unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+       bool need_irq;
+
+       need_irq = debug_handle_uart_interrupt(state, this_cpu, regs, svc_sp);
+       if (need_irq)
+               debug_force_irq(state);
+}
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t debug_uart_irq(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+       bool not_done;
+
+       handle_wakeup(state);
+
+       /* handle the debugger irq in regular context */
+       not_done = debug_handle_uart_interrupt(state, smp_processor_id(),
+                                             get_irq_regs(),
+                                             current_thread_info());
+       if (not_done)
+               debug_handle_irq_context(state);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t debug_signal_irq(int irq, void *dev)
+{
+       struct fiq_debugger_state *state = dev;
+
+       if (state->pdata->force_irq_ack)
+               state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+       debug_handle_irq_context(state);
+
+       return IRQ_HANDLED;
+}
+
+static void debug_resume(struct fiq_glue_handler *h)
+{
+       struct fiq_debugger_state *state =
+               container_of(h, struct fiq_debugger_state, handler);
+       if (state->pdata->uart_resume)
+               state->pdata->uart_resume(state->pdev);
+}
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *debug_console_device(struct console *co, int *index)
+{
+       *index = co->index;
+       return fiq_tty_driver;
+}
+
+static void debug_console_write(struct console *co,
+                               const char *s, unsigned int count)
+{
+       struct fiq_debugger_state *state;
+       unsigned long flags;
+
+       state = container_of(co, struct fiq_debugger_state, console);
+
+       if (!state->console_enable && !state->syslog_dumping)
+               return;
+
+       debug_uart_enable(state);
+       spin_lock_irqsave(&state->console_lock, flags);
+       while (count--) {
+               if (*s == '\n')
+                       debug_putc(state, '\r');
+               debug_putc(state, *s++);
+       }
+       debug_uart_flush(state);
+       spin_unlock_irqrestore(&state->console_lock, flags);
+       debug_uart_disable(state);
+}
+
+static struct console fiq_debugger_console = {
+       .name = "ttyFIQ",
+       .device = debug_console_device,
+       .write = debug_console_write,
+       .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+       int line = tty->index;
+       struct fiq_debugger_state **states = tty->driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+
+       return tty_port_open(&state->tty_port, tty, filp);
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+       tty_port_close(tty->port, tty, filp);
+}
+
+int  fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+       int i;
+       int line = tty->index;
+       struct fiq_debugger_state **states = tty->driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+
+       if (!state->console_enable)
+               return count;
+
+       debug_uart_enable(state);
+       spin_lock_irq(&state->console_lock);
+       for (i = 0; i < count; i++)
+               debug_putc(state, *buf++);
+       spin_unlock_irq(&state->console_lock);
+       debug_uart_disable(state);
+
+       return count;
+}
+
+int  fiq_tty_write_room(struct tty_struct *tty)
+{
+       return 16;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options)
+{
+       return 0;
+}
+
+static int fiq_tty_poll_get_char(struct tty_driver *driver, int line)
+{
+       struct fiq_debugger_state **states = driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+       int c = NO_POLL_CHAR;
+
+       debug_uart_enable(state);
+       if (debug_have_fiq(state)) {
+               int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+               if (count > 0) {
+                       c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+                       fiq_debugger_ringbuf_consume(state->tty_rbuf, 1);
+               }
+       } else {
+               c = debug_getc(state);
+               if (c == FIQ_DEBUGGER_NO_CHAR)
+                       c = NO_POLL_CHAR;
+       }
+       debug_uart_disable(state);
+
+       return c;
+}
+
+static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+       struct fiq_debugger_state **states = driver->driver_state;
+       struct fiq_debugger_state *state = states[line];
+       debug_uart_enable(state);
+       debug_putc(state, ch);
+       debug_uart_disable(state);
+}
+#endif
+
+static const struct tty_port_operations fiq_tty_port_ops;
+
+static const struct tty_operations fiq_tty_driver_ops = {
+       .write = fiq_tty_write,
+       .write_room = fiq_tty_write_room,
+       .open = fiq_tty_open,
+       .close = fiq_tty_close,
+#ifdef CONFIG_CONSOLE_POLL
+       .poll_init = fiq_tty_poll_init,
+       .poll_get_char = fiq_tty_poll_get_char,
+       .poll_put_char = fiq_tty_poll_put_char,
+#endif
+};
+
+static int fiq_debugger_tty_init(void)
+{
+       int ret;
+       struct fiq_debugger_state **states = NULL;
+
+       states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL);
+       if (!states) {
+               pr_err("Failed to allocate fiq debugger state structres\n");
+               return -ENOMEM;
+       }
+
+       fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS);
+       if (!fiq_tty_driver) {
+               pr_err("Failed to allocate fiq debugger tty\n");
+               ret = -ENOMEM;
+               goto err_free_state;
+       }
+
+       fiq_tty_driver->owner           = THIS_MODULE;
+       fiq_tty_driver->driver_name     = "fiq-debugger";
+       fiq_tty_driver->name            = "ttyFIQ";
+       fiq_tty_driver->type            = TTY_DRIVER_TYPE_SERIAL;
+       fiq_tty_driver->subtype         = SERIAL_TYPE_NORMAL;
+       fiq_tty_driver->init_termios    = tty_std_termios;
+       fiq_tty_driver->flags           = TTY_DRIVER_REAL_RAW |
+                                         TTY_DRIVER_DYNAMIC_DEV;
+       fiq_tty_driver->driver_state    = states;
+
+       fiq_tty_driver->init_termios.c_cflag =
+                                       B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+       fiq_tty_driver->init_termios.c_ispeed = 115200;
+       fiq_tty_driver->init_termios.c_ospeed = 115200;
+
+       tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops);
+
+       ret = tty_register_driver(fiq_tty_driver);
+       if (ret) {
+               pr_err("Failed to register fiq tty: %d\n", ret);
+               goto err_free_tty;
+       }
+
+       pr_info("Registered FIQ tty driver\n");
+       return 0;
+
+err_free_tty:
+       put_tty_driver(fiq_tty_driver);
+       fiq_tty_driver = NULL;
+err_free_state:
+       kfree(states);
+       return ret;
+}
+
+static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state)
+{
+       int ret;
+       struct device *tty_dev;
+       struct fiq_debugger_state **states = fiq_tty_driver->driver_state;
+
+       states[state->pdev->id] = state;
+
+       state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+       if (!state->tty_rbuf) {
+               pr_err("Failed to allocate fiq debugger ringbuf\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       tty_port_init(&state->tty_port);
+       state->tty_port.ops = &fiq_tty_port_ops;
+
+       tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver,
+                                          state->pdev->id, &state->pdev->dev);
+       if (IS_ERR(tty_dev)) {
+               pr_err("Failed to register fiq debugger tty device\n");
+               ret = PTR_ERR(tty_dev);
+               goto err;
+       }
+
+       device_set_wakeup_capable(tty_dev, 1);
+
+       pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id);
+
+       return 0;
+
+err:
+       fiq_debugger_ringbuf_free(state->tty_rbuf);
+       state->tty_rbuf = NULL;
+       return ret;
+}
+#endif
+
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+       if (state->pdata->uart_dev_suspend)
+               return state->pdata->uart_dev_suspend(pdev);
+       return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+       if (state->pdata->uart_dev_resume)
+               return state->pdata->uart_dev_resume(pdev);
+       return 0;
+}
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+       struct fiq_debugger_state *state;
+       int fiq;
+       int uart_irq;
+
+       if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS)
+               return -EINVAL;
+
+       if (!pdata->uart_getc || !pdata->uart_putc)
+               return -EINVAL;
+       if ((pdata->uart_enable && !pdata->uart_disable) ||
+           (!pdata->uart_enable && pdata->uart_disable))
+               return -EINVAL;
+
+       fiq = platform_get_irq_byname(pdev, "fiq");
+       uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+       /* uart_irq mode and fiq mode are mutually exclusive, but one of them
+        * is required */
+       if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+               return -EINVAL;
+       if (fiq >= 0 && !pdata->fiq_enable)
+               return -EINVAL;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       setup_timer(&state->sleep_timer, sleep_timer_expired,
+                   (unsigned long)state);
+       state->pdata = pdata;
+       state->pdev = pdev;
+       state->no_sleep = initial_no_sleep;
+       state->debug_enable = initial_debug_enable;
+       state->console_enable = initial_console_enable;
+
+       state->fiq = fiq;
+       state->uart_irq = uart_irq;
+       state->signal_irq = platform_get_irq_byname(pdev, "signal");
+       state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+       INIT_WORK(&state->work, debug_work);
+       spin_lock_init(&state->work_lock);
+
+       platform_set_drvdata(pdev, state);
+
+       spin_lock_init(&state->sleep_timer_lock);
+
+       if (state->wakeup_irq < 0 && debug_have_fiq(state))
+               state->no_sleep = true;
+       state->ignore_next_wakeup_irq = !state->no_sleep;
+
+       wake_lock_init(&state->debugger_wake_lock,
+                       WAKE_LOCK_SUSPEND, "serial-debug");
+
+       state->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(state->clk))
+               state->clk = NULL;
+
+       /* do not call pdata->uart_enable here since uart_init may still
+        * need to do some initialization before uart_enable can work.
+        * So, only try to manage the clock during init.
+        */
+       if (state->clk)
+               clk_enable(state->clk);
+
+       if (pdata->uart_init) {
+               ret = pdata->uart_init(pdev);
+               if (ret)
+                       goto err_uart_init;
+       }
+
+       debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n",
+                               state->no_sleep ? "" : "twice ");
+
+       if (debug_have_fiq(state)) {
+               state->handler.fiq = debug_fiq;
+               state->handler.resume = debug_resume;
+               ret = fiq_glue_register_handler(&state->handler);
+               if (ret) {
+                       pr_err("%s: could not install fiq handler\n", __func__);
+                       goto err_register_fiq;
+               }
+
+               pdata->fiq_enable(pdev, state->fiq, 1);
+       } else {
+               ret = request_irq(state->uart_irq, debug_uart_irq,
+                                 IRQF_NO_SUSPEND, "debug", state);
+               if (ret) {
+                       pr_err("%s: could not install irq handler\n", __func__);
+                       goto err_register_irq;
+               }
+
+               /* for irq-only mode, we want this irq to wake us up, if it
+                * can.
+                */
+               enable_irq_wake(state->uart_irq);
+       }
+
+       if (state->clk)
+               clk_disable(state->clk);
+
+       if (state->signal_irq >= 0) {
+               ret = request_irq(state->signal_irq, debug_signal_irq,
+                         IRQF_TRIGGER_RISING, "debug-signal", state);
+               if (ret)
+                       pr_err("serial_debugger: could not install signal_irq");
+       }
+
+       if (state->wakeup_irq >= 0) {
+               ret = request_irq(state->wakeup_irq, wakeup_irq_handler,
+                                 IRQF_TRIGGER_FALLING | IRQF_DISABLED,
+                                 "debug-wakeup", state);
+               if (ret) {
+                       pr_err("serial_debugger: "
+                               "could not install wakeup irq\n");
+                       state->wakeup_irq = -1;
+               } else {
+                       ret = enable_irq_wake(state->wakeup_irq);
+                       if (ret) {
+                               pr_err("serial_debugger: "
+                                       "could not enable wakeup\n");
+                               state->wakeup_irq_no_set_wake = true;
+                       }
+               }
+       }
+       if (state->no_sleep)
+               handle_wakeup(state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       spin_lock_init(&state->console_lock);
+       state->console = fiq_debugger_console;
+       state->console.index = pdev->id;
+       if (!console_set_on_cmdline)
+               add_preferred_console(state->console.name,
+                       state->console.index, NULL);
+       register_console(&state->console);
+       fiq_debugger_tty_init_one(state);
+#endif
+       return 0;
+
+err_register_irq:
+err_register_fiq:
+       if (pdata->uart_free)
+               pdata->uart_free(pdev);
+err_uart_init:
+       if (state->clk)
+               clk_disable(state->clk);
+       if (state->clk)
+               clk_put(state->clk);
+       wake_lock_destroy(&state->debugger_wake_lock);
+       platform_set_drvdata(pdev, NULL);
+       kfree(state);
+       return ret;
+}
+
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+       .suspend        = fiq_debugger_dev_suspend,
+       .resume         = fiq_debugger_dev_resume,
+};
+
+static struct platform_driver fiq_debugger_driver = {
+       .probe  = fiq_debugger_probe,
+       .driver = {
+               .name   = "fiq_debugger",
+               .pm     = &fiq_debugger_dev_pm_ops,
+       },
+};
+
+static int __init fiq_debugger_init(void)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+       fiq_debugger_tty_init();
+#endif
+       return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/arch/arm/common/fiq_debugger_ringbuf.h b/arch/arm/common/fiq_debugger_ringbuf.h
new file mode 100644 (file)
index 0000000..2649b55
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * arch/arm/common/fiq_debugger_ringbuf.c
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+       int len;
+       int head;
+       int tail;
+       u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+       struct fiq_debugger_ringbuf *rbuf;
+
+       rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+       if (rbuf == NULL)
+               return NULL;
+
+       rbuf->len = len;
+       rbuf->head = 0;
+       rbuf->tail = 0;
+       smp_mb();
+
+       return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+       kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+       int level = rbuf->head - rbuf->tail;
+
+       if (level < 0)
+               level = rbuf->len + level;
+
+       return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+       return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+       return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+       count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+       rbuf->tail = (rbuf->tail + count) % rbuf->len;
+       smp_mb();
+
+       return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+       if (fiq_debugger_ringbuf_room(rbuf) == 0)
+               return 0;
+
+       rbuf->buf[rbuf->head] = datum;
+       smp_mb();
+       rbuf->head = (rbuf->head + 1) % rbuf->len;
+       smp_mb();
+
+       return 1;
+}
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644 (file)
index 0000000..9e3455a
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+               .text
+
+               .global fiq_glue_end
+
+               /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+               /* store pc, cpsr from previous mode */
+               mrs     r12, spsr
+               sub     r11, lr, #4
+               subs    r10, #1
+               bne     nested_fiq
+
+               stmfd   sp!, {r11-r12, lr}
+
+               /* store r8-r14 from previous mode */
+               sub     sp, sp, #(7 * 4)
+               stmia   sp, {r8-r14}^
+               nop
+
+               /* store r0-r7 from previous mode */
+               stmfd   sp!, {r0-r7}
+
+               /* setup func(data,regs) arguments */
+               mov     r0, r9
+               mov     r1, sp
+               mov     r3, r8
+
+               mov     r7, sp
+
+               /* Get sp and lr from non-user modes */
+               and     r4, r12, #MODE_MASK
+               cmp     r4, #USR_MODE
+               beq     fiq_from_usr_mode
+
+               mov     r7, sp
+               orr     r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+               msr     cpsr_c, r4
+               str     sp, [r7, #(4 * 13)]
+               str     lr, [r7, #(4 * 14)]
+               mrs     r5, spsr
+               str     r5, [r7, #(4 * 17)]
+
+               cmp     r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+               /* use fiq stack if we reenter this mode */
+               subne   sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+               msr     cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+               mov     r2, sp
+               sub     sp, r7, #12
+               stmfd   sp!, {r2, ip, lr}
+               /* call func(data,regs) */
+               blx     r3
+               ldmfd   sp, {r2, ip, lr}
+               mov     sp, r2
+
+               /* restore/discard saved state */
+               cmp     r4, #USR_MODE
+               beq     fiq_from_usr_mode_exit
+
+               msr     cpsr_c, r4
+               ldr     sp, [r7, #(4 * 13)]
+               ldr     lr, [r7, #(4 * 14)]
+               msr     spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+               msr     cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+               ldmfd   sp!, {r0-r7}
+               add     sp, sp, #(7 * 4)
+               ldmfd   sp!, {r11-r12, lr}
+exit_fiq:
+               msr     spsr_cxsf, r12
+               add     r10, #1
+               movs    pc, r11
+
+nested_fiq:
+               orr     r12, r12, #(PSR_F_BIT)
+               b       exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp */
+               mrs             r3, cpsr
+               msr             cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+               movs            r8, r0
+               mov             r9, r1
+               mov             sp, r2
+               moveq           r10, #0
+               movne           r10, #1
+               msr             cpsr_c, r3
+               bx              lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644 (file)
index 0000000..4044c7d
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+       .name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+       struct fiq_glue_handler *handler = info;
+       fiq_glue_setup(handler->fiq, handler,
+               __get_cpu_var(fiq_stack) + THREAD_START_SP);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+       int ret;
+       int cpu;
+
+       if (!handler || !handler->fiq)
+               return -EINVAL;
+
+       mutex_lock(&fiq_glue_lock);
+       if (fiq_stack) {
+               ret = -EBUSY;
+               goto err_busy;
+       }
+
+       for_each_possible_cpu(cpu) {
+               void *stack;
+               stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+               if (WARN_ON(!stack)) {
+                       ret = -ENOMEM;
+                       goto err_alloc_fiq_stack;
+               }
+               per_cpu(fiq_stack, cpu) = stack;
+       }
+
+       ret = claim_fiq(&fiq_debbuger_fiq_handler);
+       if (WARN_ON(ret))
+               goto err_claim_fiq;
+
+       current_handler = handler;
+       on_each_cpu(fiq_glue_setup_helper, handler, true);
+       set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+       mutex_unlock(&fiq_glue_lock);
+       return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+       for_each_possible_cpu(cpu) {
+               __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+               per_cpu(fiq_stack, cpu) = NULL;
+       }
+err_busy:
+       mutex_unlock(&fiq_glue_lock);
+       return ret;
+}
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+       if (!current_handler)
+               return;
+       fiq_glue_setup(current_handler->fiq, current_handler,
+               __get_cpu_var(fiq_stack) + THREAD_START_SP);
+       if (current_handler->resume)
+               current_handler->resume(current_handler);
+}
+
diff --git a/arch/arm/configs/android_omap_defconfig b/arch/arm/configs/android_omap_defconfig
new file mode 100644 (file)
index 0000000..0cf8ea2
--- /dev/null
@@ -0,0 +1,286 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_OMAP=y
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_MUX_DEBUG=y
+CONFIG_SOC_OMAP5=y
+CONFIG_ARM_THUMBEE=y
+CONFIG_ARM_ERRATA_411920=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200 androidboot.console=ttyO2"
+CONFIG_KEXEC=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_GENERIC_CPUFREQ_CPU0=y
+# CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
+CONFIG_FPE_NWFPE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PM_DEBUG=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+CONFIG_BT=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_OMAP_OCP2SCP=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_ONENAND=y
+CONFIG_MTD_ONENAND_VERIFY_WRITE=y
+CONFIG_MTD_ONENAND_OMAP2=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_SATA_AHCI_PLATFORM=m
+# CONFIG_ATA_SFF is not set
+CONFIG_MD=y
+CONFIG_NETDEVICES=y
+CONFIG_KS8851=y
+CONFIG_KS8851_MLL=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_TI_CPSW=y
+CONFIG_SMSC_PHY=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_SDIO=m
+CONFIG_LIBERTAS_DEBUG=y
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_TWL4030=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_TWL4030_PWRBUTTON=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_OMAP24XX=y
+CONFIG_DEBUG_PINCTRL=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_W1=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_WATCHDOG=y
+CONFIG_PALMAS_WATCHDOG=y
+CONFIG_OMAP_WATCHDOG=y
+CONFIG_TWL4030_WATCHDOG=y
+CONFIG_MFD_TPS65217=y
+CONFIG_MFD_TPS65910=y
+CONFIG_MFD_PALMAS_GPADC=y
+CONFIG_MFD_PALMAS_PWM=y
+CONFIG_MFD_PALMAS_RESOURCE=y
+CONFIG_REGULATOR_TPS65023=y
+CONFIG_REGULATOR_TPS6507X=y
+CONFIG_REGULATOR_TPS65217=y
+CONFIG_REGULATOR_TPS65910=y
+CONFIG_REGULATOR_TWL4030=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_DRM=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_DSS_RFBI=y
+CONFIG_OMAP2_DSS_SDI=y
+CONFIG_OMAP2_DSS_DSI=y
+CONFIG_PANEL_GENERIC_DPI=m
+CONFIG_PANEL_SHARP_LS037V7DW01=m
+CONFIG_PANEL_NEC_NL8048HL11_01B=m
+CONFIG_PANEL_TAAL=m
+CONFIG_PANEL_TPO_TD043MTEA1=m
+CONFIG_PANEL_ACX565AKM=m
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_DEBUG=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_SOC=m
+CONFIG_SND_OMAP_SOC=m
+CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
+CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_DWC3=m
+CONFIG_USB_DWC3_DEBUG=y
+CONFIG_USB_DWC3_VERBOSE=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_MUSB_HDRC=m
+CONFIG_USB_MUSB_OMAP2PLUS=m
+CONFIG_USB_MUSB_AM35X=m
+CONFIG_USB_MUSB_DSPS=m
+CONFIG_MUSB_PIO_ONLY=y
+CONFIG_USB_WDM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_TEST=y
+CONFIG_OMAP_USB2=m
+CONFIG_OMAP_USB3=m
+CONFIG_OMAP_CONTROL_USB=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_EEM=y
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_MULTI=m
+CONFIG_USB_G_MULTI_CDC=y
+CONFIG_PALMAS_USB=m
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_SDIO_UART=y
+CONFIG_MMC_OMAP=y
+CONFIG_MMC_OMAP_HS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_TWL92330=y
+CONFIG_RTC_DRV_TWL4030=y
+CONFIG_RTC_DRV_PALMAS=y
+CONFIG_RTC_DRV_OMAP=y
+CONFIG_DMADEVICES=y
+CONFIG_TI_EDMA=y
+CONFIG_DMA_OMAP=y
+CONFIG_STAGING=y
+CONFIG_DRM_OMAP=y
+CONFIG_DRM_OMAP_NUM_CRTCS=3
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_UBIFS_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_PROVE_LOCKING=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_CCITT=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=y
index e1489c54cd12b4dcb8b997e8e327e2875b183b0c..4e8217b204af73af8d9579ed07bd0fec97e9ae56 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/shmparam.h>
 #include <asm/cachetype.h>
 #include <asm/outercache.h>
+#include <asm/rodata.h>
 
 #define CACHE_COLOUR(vaddr)    ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 
diff --git a/arch/arm/include/asm/fiq_debugger.h b/arch/arm/include/asm/fiq_debugger.h
new file mode 100644 (file)
index 0000000..4d27488
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * arch/arm/include/asm/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME      "fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME   "signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME   "wakeup"
+
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume:       used to restore uart state right before enabling
+ *                     the fiq.
+ * @uart_enable:       Do the work necessary to communicate with the uart
+ *                     hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable:      Do the work necessary to disable the uart hw
+ *                     (disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend:  called during PM suspend, generally not needed
+ *                     for real fiq mode debugger.
+ * @uart_dev_resume:   called during PM resume, generally not needed
+ *                     for real fiq mode debugger.
+ */
+struct fiq_debugger_pdata {
+       int (*uart_init)(struct platform_device *pdev);
+       void (*uart_free)(struct platform_device *pdev);
+       int (*uart_resume)(struct platform_device *pdev);
+       int (*uart_getc)(struct platform_device *pdev);
+       void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+       void (*uart_flush)(struct platform_device *pdev);
+       void (*uart_enable)(struct platform_device *pdev);
+       void (*uart_disable)(struct platform_device *pdev);
+
+       int (*uart_dev_suspend)(struct platform_device *pdev);
+       int (*uart_dev_resume)(struct platform_device *pdev);
+
+       void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+                                                               bool enable);
+       void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+       void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+       void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
new file mode 100644 (file)
index 0000000..d54c29d
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+       void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
+       void (*resume)(struct fiq_glue_handler *h);
+};
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+
+#ifdef CONFIG_FIQ_GLUE
+void fiq_glue_resume(void);
+#else
+static inline void fiq_glue_resume(void) {}
+#endif
+
+#endif
index 2740c2a2df639361617f6fe484ead14f8625eaf2..3d7351c844aac0ae2392d441796ce9904dcaf717 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI 6
+#define NR_IPI 7
 
 typedef struct {
        unsigned int __softirq_pending;
index 7ecd793b8f5a94ebdbba19ee26ac9a75eaaa33d6..dcf74d715f2b253bd12cea21ed267b2ef87c5ad2 100644 (file)
 #define TRACER_ACCESSED_BIT    0
 #define TRACER_RUNNING_BIT     1
 #define TRACER_CYCLE_ACC_BIT   2
+#define TRACER_TRACE_DATA_BIT  3
+#define TRACER_TIMESTAMP_BIT   4
+#define TRACER_BRANCHOUTPUT_BIT        5
+#define TRACER_RETURN_STACK_BIT        6
 #define TRACER_ACCESSED                BIT(TRACER_ACCESSED_BIT)
 #define TRACER_RUNNING         BIT(TRACER_RUNNING_BIT)
 #define TRACER_CYCLE_ACC       BIT(TRACER_CYCLE_ACC_BIT)
+#define TRACER_TRACE_DATA      BIT(TRACER_TRACE_DATA_BIT)
+#define TRACER_TIMESTAMP       BIT(TRACER_TIMESTAMP_BIT)
+#define TRACER_BRANCHOUTPUT    BIT(TRACER_BRANCHOUTPUT_BIT)
+#define TRACER_RETURN_STACK    BIT(TRACER_RETURN_STACK_BIT)
 
 #define TRACER_TIMEOUT 10000
 
-#define etm_writel(t, v, x) \
-       (__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+#define etm_writel(t, id, v, x) \
+       (__raw_writel((v), (t)->etm_regs[(id)] + (x)))
+#define etm_readl(t, id, x) (__raw_readl((t)->etm_regs[(id)] + (x)))
 
 /* CoreSight Management Registers */
 #define CSMR_LOCKACCESS 0xfb0
@@ -43,7 +51,7 @@
 #define ETMCTRL_POWERDOWN      1
 #define ETMCTRL_PROGRAM                (1 << 10)
 #define ETMCTRL_PORTSEL                (1 << 11)
-#define ETMCTRL_DO_CONTEXTID   (3 << 14)
+#define ETMCTRL_CONTEXTIDSIZE(x) (((x) & 3) << 14)
 #define ETMCTRL_PORTMASK1      (7 << 4)
 #define ETMCTRL_PORTMASK2      (1 << 21)
 #define ETMCTRL_PORTMASK       (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
 #define ETMCTRL_DATA_DO_BOTH   (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
 #define ETMCTRL_BRANCH_OUTPUT  (1 << 8)
 #define ETMCTRL_CYCLEACCURATE  (1 << 12)
+#define ETMCTRL_TIMESTAMP_EN   (1 << 28)
+#define ETMCTRL_RETURN_STACK_EN        (1 << 29)
 
 /* ETM configuration code register */
 #define ETMR_CONFCODE          (0x04)
+#define ETMCCR_ETMIDR_PRESENT  BIT(31)
 
 /* ETM trace start/stop resource control register */
 #define ETMR_TRACESSCTRL       (0x18)
 #define ETMR_TRACEENCTRL       0x24
 #define ETMTE_INCLEXCL         BIT(24)
 #define ETMR_TRACEENEVT                0x20
-#define ETMCTRL_OPTS           (ETMCTRL_DO_CPRT | \
-                               ETMCTRL_DATA_DO_ADDR | \
-                               ETMCTRL_BRANCH_OUTPUT | \
-                               ETMCTRL_DO_CONTEXTID)
+
+#define ETMR_VIEWDATAEVT       0x30
+#define ETMR_VIEWDATACTRL1     0x34
+#define ETMR_VIEWDATACTRL2     0x38
+#define ETMR_VIEWDATACTRL3     0x3c
+#define ETMVDC3_EXCLONLY       BIT(16)
+
+#define ETMCTRL_OPTS           (ETMCTRL_DO_CPRT)
+
+#define ETMR_ID                        0x1e4
+#define ETMIDR_VERSION(x)      (((x) >> 4) & 0xff)
+#define ETMIDR_VERSION_3_1     0x21
+#define ETMIDR_VERSION_PFT_1_0 0x30
+
+#define ETMR_CCE               0x1e8
+#define ETMCCER_RETURN_STACK_IMPLEMENTED       BIT(23)
+#define ETMCCER_TIMESTAMPING_IMPLEMENTED       BIT(22)
+
+#define ETMR_TRACEIDR          0x200
 
 /* ETM management registers, "ETM Architecture", 3.5.24 */
 #define ETMMR_OSLAR    0x300
 #define ETBFF_TRIGIN           BIT(8)
 #define ETBFF_TRIGEVT          BIT(9)
 #define ETBFF_TRIGFL           BIT(10)
+#define ETBFF_STOPFL           BIT(12)
 
 #define etb_writel(t, v, x) \
        (__raw_writel((v), (t)->etb_regs + (x)))
 #define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
 
-#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t) \
-       do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
+#define etm_lock(t, id) \
+       do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
+#define etm_unlock(t, id) \
+       do { etm_writel((t), (id), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
 
 #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
 #define etb_unlock(t) \
index 35c21c375d81c19121495c66114ead04044f0fa3..3e0857a6248e2ea53e200c497f2c7032798cc9d6 100644 (file)
@@ -30,6 +30,9 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
 void handle_IRQ(unsigned int, struct pt_regs *);
 void init_IRQ(void);
 
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
new file mode 100644 (file)
index 0000000..bca864a
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *  arch/arm/include/asm/mach/mmc.h
+ */
+#ifndef ASMARM_MACH_MMC_H
+#define ASMARM_MACH_MMC_H
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
+
+struct mmc_platform_data {
+       unsigned int ocr_mask;                  /* available voltages */
+       int built_in;                           /* built-in device flag */
+       int card_present;                       /* card detect state */
+       u32 (*translate_vdd)(struct device *, unsigned int);
+       unsigned int (*status)(struct device *);
+       struct embedded_sdio_data *embedded_sdio;
+       int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+};
+
+#endif
diff --git a/arch/arm/include/asm/rodata.h b/arch/arm/include/asm/rodata.h
new file mode 100644 (file)
index 0000000..8c8add8
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ *  arch/arm/include/asm/rodata.h
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_RODATA_H
+#define _ASMARM_RODATA_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_DEBUG_RODATA
+
+int set_memory_rw(unsigned long virt, int numpages);
+int set_memory_ro(unsigned long virt, int numpages);
+
+void mark_rodata_ro(void);
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
+#endif
+
+#endif
index d3a22bebe6ce415c952cbb631929aa1a37157ea6..c5aa088c0a8bf94079307f9708d8a009bd36d624 100644 (file)
@@ -81,6 +81,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
 
+extern void smp_send_all_cpu_backtrace(void);
+
 struct smp_operations {
 #ifdef CONFIG_SMP
        /*
index 9b6de8c988f30e67af04325c081c8d0809e9f663..a8433bdee0028165c815a965eae335f6b7b2860b 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/io.h>
+#include <linux/slab.h>
 #include <linux/sysrq.h>
 #include <linux/device.h>
 #include <linux/clk.h>
@@ -37,26 +38,37 @@ MODULE_AUTHOR("Alexander Shishkin");
 struct tracectx {
        unsigned int    etb_bufsz;
        void __iomem    *etb_regs;
-       void __iomem    *etm_regs;
+       void __iomem    **etm_regs;
+       int             etm_regs_count;
        unsigned long   flags;
        int             ncmppairs;
        int             etm_portsz;
+       int             etm_contextid_size;
+       u32             etb_fc;
+       unsigned long   range_start;
+       unsigned long   range_end;
+       unsigned long   data_range_start;
+       unsigned long   data_range_end;
+       bool            dump_initial_etb;
        struct device   *dev;
        struct clk      *emu_clk;
        struct mutex    mutex;
 };
 
-static struct tracectx tracer;
+static struct tracectx tracer = {
+       .range_start = (unsigned long)_stext,
+       .range_end = (unsigned long)_etext,
+};
 
 static inline bool trace_isrunning(struct tracectx *t)
 {
        return !!(t->flags & TRACER_RUNNING);
 }
 
-static int etm_setup_address_range(struct tracectx *t, int n,
+static int etm_setup_address_range(struct tracectx *t, int id, int n,
                unsigned long start, unsigned long end, int exclude, int data)
 {
-       u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
+       u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
                    ETMAAT_NOVALCMP;
 
        if (n < 1 || n > t->ncmppairs)
@@ -72,95 +84,185 @@ static int etm_setup_address_range(struct tracectx *t, int n,
                flags |= ETMAAT_IEXEC;
 
        /* first comparator for the range */
-       etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
-       etm_writel(t, start, ETMR_COMP_VAL(n * 2));
+       etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
+       etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
 
        /* second comparator is right next to it */
-       etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
-       etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
-
-       flags = exclude ? ETMTE_INCLEXCL : 0;
-       etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
+       etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
+       etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
+
+       if (data) {
+               flags = exclude ? ETMVDC3_EXCLONLY : 0;
+               if (exclude)
+                       n += 8;
+               etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
+       } else {
+               flags = exclude ? ETMTE_INCLEXCL : 0;
+               etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
+       }
 
        return 0;
 }
 
-static int trace_start(struct tracectx *t)
+static int trace_start_etm(struct tracectx *t, int id)
 {
        u32 v;
        unsigned long timeout = TRACER_TIMEOUT;
 
-       etb_unlock(t);
-
-       etb_writel(t, 0, ETBR_FORMATTERCTRL);
-       etb_writel(t, 1, ETBR_CTRL);
-
-       etb_lock(t);
-
-       /* configure etm */
        v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
+       v |= ETMCTRL_CONTEXTIDSIZE(t->etm_contextid_size);
 
        if (t->flags & TRACER_CYCLE_ACC)
                v |= ETMCTRL_CYCLEACCURATE;
 
-       etm_unlock(t);
+       if (t->flags & TRACER_BRANCHOUTPUT)
+               v |= ETMCTRL_BRANCH_OUTPUT;
+
+       if (t->flags & TRACER_TRACE_DATA)
+               v |= ETMCTRL_DATA_DO_ADDR;
+
+       if (t->flags & TRACER_TIMESTAMP)
+               v |= ETMCTRL_TIMESTAMP_EN;
+
+       if (t->flags & TRACER_RETURN_STACK)
+               v |= ETMCTRL_RETURN_STACK_EN;
 
-       etm_writel(t, v, ETMR_CTRL);
+       etm_unlock(t, id);
 
-       while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+       etm_writel(t, id, v, ETMR_CTRL);
+
+       while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
                ;
        if (!timeout) {
                dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-               etm_lock(t);
+               etm_lock(t, id);
                return -EFAULT;
        }
 
-       etm_setup_address_range(t, 1, (unsigned long)_stext,
-                       (unsigned long)_etext, 0, 0);
-       etm_writel(t, 0, ETMR_TRACEENCTRL2);
-       etm_writel(t, 0, ETMR_TRACESSCTRL);
-       etm_writel(t, 0x6f, ETMR_TRACEENEVT);
+       if (t->range_start || t->range_end)
+               etm_setup_address_range(t, id, 1,
+                                       t->range_start, t->range_end, 0, 0);
+       else
+               etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
+
+       etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
+       etm_writel(t, id, 0, ETMR_TRACESSCTRL);
+       etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
+
+       etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
+       etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
+
+       if (t->data_range_start || t->data_range_end)
+               etm_setup_address_range(t, id, 2, t->data_range_start,
+                                       t->data_range_end, 0, 1);
+       else
+               etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
+
+       etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
 
        v &= ~ETMCTRL_PROGRAM;
        v |= ETMCTRL_PORTSEL;
 
-       etm_writel(t, v, ETMR_CTRL);
+       etm_writel(t, id, v, ETMR_CTRL);
 
        timeout = TRACER_TIMEOUT;
-       while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
+       while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
                ;
        if (!timeout) {
                dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
-               etm_lock(t);
+               etm_lock(t, id);
                return -EFAULT;
        }
 
-       etm_lock(t);
+       etm_lock(t, id);
+       return 0;
+}
+
+static int trace_start(struct tracectx *t)
+{
+       int ret;
+       int id;
+       u32 etb_fc = t->etb_fc;
+
+       etb_unlock(t);
+
+       t->dump_initial_etb = false;
+       etb_writel(t, 0, ETBR_WRITEADDR);
+       etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
+       etb_writel(t, 1, ETBR_CTRL);
+
+       etb_lock(t);
+
+       /* configure etm(s) */
+       for (id = 0; id < t->etm_regs_count; id++) {
+               ret = trace_start_etm(t, id);
+               if (ret)
+                       return ret;
+       }
 
        t->flags |= TRACER_RUNNING;
 
        return 0;
 }
 
-static int trace_stop(struct tracectx *t)
+static int trace_stop_etm(struct tracectx *t, int id)
 {
        unsigned long timeout = TRACER_TIMEOUT;
 
-       etm_unlock(t);
+       etm_unlock(t, id);
 
-       etm_writel(t, 0x440, ETMR_CTRL);
-       while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
+       etm_writel(t, id, 0x440, ETMR_CTRL);
+       while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
                ;
        if (!timeout) {
-               dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-               etm_lock(t);
+               dev_err(t->dev,
+                       "etm%d: Waiting for progbit to assert timed out\n",
+                       id);
+               etm_lock(t, id);
                return -EFAULT;
        }
 
-       etm_lock(t);
+       etm_lock(t, id);
+       return 0;
+}
+
+static int trace_power_down_etm(struct tracectx *t, int id)
+{
+       unsigned long timeout = TRACER_TIMEOUT;
+       etm_unlock(t, id);
+       while (!(etm_readl(t, id, ETMR_STATUS) & ETMST_PROGBIT) && --timeout)
+               ;
+       if (!timeout) {
+               dev_err(t->dev, "etm%d: Waiting for status progbit to assert timed out\n",
+                       id);
+               etm_lock(t, id);
+               return -EFAULT;
+       }
+
+       etm_writel(t, id, 0x441, ETMR_CTRL);
+
+       etm_lock(t, id);
+       return 0;
+}
+
+static int trace_stop(struct tracectx *t)
+{
+       int id;
+       unsigned long timeout = TRACER_TIMEOUT;
+       u32 etb_fc = t->etb_fc;
+
+       for (id = 0; id < t->etm_regs_count; id++)
+               trace_stop_etm(t, id);
+
+       for (id = 0; id < t->etm_regs_count; id++)
+               trace_power_down_etm(t, id);
 
        etb_unlock(t);
-       etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
+       if (etb_fc) {
+               etb_fc |= ETBFF_STOPFL;
+               etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
+       }
+       etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
 
        timeout = TRACER_TIMEOUT;
        while (etb_readl(t, ETBR_FORMATTERCTRL) &
@@ -185,24 +287,15 @@ static int trace_stop(struct tracectx *t)
 static int etb_getdatalen(struct tracectx *t)
 {
        u32 v;
-       int rp, wp;
+       int wp;
 
        v = etb_readl(t, ETBR_STATUS);
 
        if (v & 1)
                return t->etb_bufsz;
 
-       rp = etb_readl(t, ETBR_READADDR);
        wp = etb_readl(t, ETBR_WRITEADDR);
-
-       if (rp > wp) {
-               etb_writel(t, 0, ETBR_READADDR);
-               etb_writel(t, 0, ETBR_WRITEADDR);
-
-               return 0;
-       }
-
-       return wp - rp;
+       return wp;
 }
 
 /* sysrq+v will always stop the running trace and leave it at that */
@@ -235,21 +328,18 @@ static void etm_dump(void)
                printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
        printk(KERN_INFO "\n--- ETB buffer end ---\n");
 
-       /* deassert the overflow bit */
-       etb_writel(t, 1, ETBR_CTRL);
-       etb_writel(t, 0, ETBR_CTRL);
-
-       etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-       etb_writel(t, 0, ETBR_READADDR);
-       etb_writel(t, 0, ETBR_WRITEADDR);
-
        etb_lock(t);
 }
 
 static void sysrq_etm_dump(int key)
 {
+       if (!mutex_trylock(&tracer.mutex)) {
+               printk(KERN_INFO "Tracing hardware busy\n");
+               return;
+       }
        dev_dbg(tracer.dev, "Dumping ETB buffer\n");
        etm_dump();
+       mutex_unlock(&tracer.mutex);
 }
 
 static struct sysrq_key_op sysrq_etm_op = {
@@ -276,6 +366,10 @@ static ssize_t etb_read(struct file *file, char __user *data,
        struct tracectx *t = file->private_data;
        u32 first = 0;
        u32 *buf;
+       int wpos;
+       int skip;
+       long wlength;
+       loff_t pos = *ppos;
 
        mutex_lock(&t->mutex);
 
@@ -287,31 +381,39 @@ static ssize_t etb_read(struct file *file, char __user *data,
        etb_unlock(t);
 
        total = etb_getdatalen(t);
+       if (total == 0 && t->dump_initial_etb)
+               total = t->etb_bufsz;
        if (total == t->etb_bufsz)
                first = etb_readl(t, ETBR_WRITEADDR);
 
+       if (pos > total * 4) {
+               skip = 0;
+               wpos = total;
+       } else {
+               skip = (int)pos % 4;
+               wpos = (int)pos / 4;
+       }
+       total -= wpos;
+       first = (first + wpos) % t->etb_bufsz;
+
        etb_writel(t, first, ETBR_READADDR);
 
-       length = min(total * 4, (int)len);
-       buf = vmalloc(length);
+       wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
+       length = min(total * 4 - skip, (int)len);
+       buf = vmalloc(wlength * 4);
 
-       dev_dbg(t->dev, "ETB buffer length: %d\n", total);
+       dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
+               length, pos, wlength, first);
+       dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
        dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
-       for (i = 0; i < length / 4; i++)
+       for (i = 0; i < wlength; i++)
                buf[i] = etb_readl(t, ETBR_READMEM);
 
-       /* the only way to deassert overflow bit in ETB status is this */
-       etb_writel(t, 1, ETBR_CTRL);
-       etb_writel(t, 0, ETBR_CTRL);
-
-       etb_writel(t, 0, ETBR_WRITEADDR);
-       etb_writel(t, 0, ETBR_READADDR);
-       etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
        etb_lock(t);
 
-       length -= copy_to_user(data, buf, length);
+       length -= copy_to_user(data, (u8 *)buf + skip, length);
        vfree(buf);
+       *ppos = pos + length;
 
 out:
        mutex_unlock(&t->mutex);
@@ -348,28 +450,17 @@ static int etb_probe(struct amba_device *dev, const struct amba_id *id)
        if (ret)
                goto out;
 
+       mutex_lock(&t->mutex);
        t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
        if (!t->etb_regs) {
                ret = -ENOMEM;
                goto out_release;
        }
 
+       t->dev = &dev->dev;
+       t->dump_initial_etb = true;
        amba_set_drvdata(dev, t);
 
-       etb_miscdev.parent = &dev->dev;
-
-       ret = misc_register(&etb_miscdev);
-       if (ret)
-               goto out_unmap;
-
-       t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
-       if (IS_ERR(t->emu_clk)) {
-               dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
-               return -EFAULT;
-       }
-
-       clk_enable(t->emu_clk);
-
        etb_unlock(t);
        t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
        dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
@@ -378,6 +469,20 @@ static int etb_probe(struct amba_device *dev, const struct amba_id *id)
        etb_writel(t, 0, ETBR_CTRL);
        etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
        etb_lock(t);
+       mutex_unlock(&t->mutex);
+
+       etb_miscdev.parent = &dev->dev;
+
+       ret = misc_register(&etb_miscdev);
+       if (ret)
+               goto out_unmap;
+
+       /* Get optional clock. Currently used to select clock source on omap3 */
+       t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
+       if (IS_ERR(t->emu_clk))
+               dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
+       else
+               clk_enable(t->emu_clk);
 
        dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
 
@@ -385,10 +490,13 @@ out:
        return ret;
 
 out_unmap:
+       mutex_lock(&t->mutex);
        amba_set_drvdata(dev, NULL);
        iounmap(t->etb_regs);
+       t->etb_regs = NULL;
 
 out_release:
+       mutex_unlock(&t->mutex);
        amba_release_regions(dev);
 
        return ret;
@@ -403,8 +511,10 @@ static int etb_remove(struct amba_device *dev)
        iounmap(t->etb_regs);
        t->etb_regs = NULL;
 
-       clk_disable(t->emu_clk);
-       clk_put(t->emu_clk);
+       if (!IS_ERR(t->emu_clk)) {
+               clk_disable(t->emu_clk);
+               clk_put(t->emu_clk);
+       }
 
        amba_release_regions(dev);
 
@@ -448,7 +558,10 @@ static ssize_t trace_running_store(struct kobject *kobj,
                return -EINVAL;
 
        mutex_lock(&tracer.mutex);
-       ret = value ? trace_start(&tracer) : trace_stop(&tracer);
+       if (!tracer.etb_regs)
+               ret = -ENODEV;
+       else
+               ret = value ? trace_start(&tracer) : trace_stop(&tracer);
        mutex_unlock(&tracer.mutex);
 
        return ret ? : n;
@@ -463,36 +576,50 @@ static ssize_t trace_info_show(struct kobject *kobj,
 {
        u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
        int datalen;
+       int id;
+       int ret;
 
-       etb_unlock(&tracer);
-       datalen = etb_getdatalen(&tracer);
-       etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
-       etb_ra = etb_readl(&tracer, ETBR_READADDR);
-       etb_st = etb_readl(&tracer, ETBR_STATUS);
-       etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
-       etb_lock(&tracer);
-
-       etm_unlock(&tracer);
-       etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
-       etm_st = etm_readl(&tracer, ETMR_STATUS);
-       etm_lock(&tracer);
+       mutex_lock(&tracer.mutex);
+       if (tracer.etb_regs) {
+               etb_unlock(&tracer);
+               datalen = etb_getdatalen(&tracer);
+               etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
+               etb_ra = etb_readl(&tracer, ETBR_READADDR);
+               etb_st = etb_readl(&tracer, ETBR_STATUS);
+               etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
+               etb_lock(&tracer);
+       } else {
+               etb_wa = etb_ra = etb_st = etb_fc = ~0;
+               datalen = -1;
+       }
 
-       return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
+       ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
                        "ETBR_WRITEADDR:\t%08x\n"
                        "ETBR_READADDR:\t%08x\n"
                        "ETBR_STATUS:\t%08x\n"
-                       "ETBR_FORMATTERCTRL:\t%08x\n"
-                       "ETMR_CTRL:\t%08x\n"
-                       "ETMR_STATUS:\t%08x\n",
+                       "ETBR_FORMATTERCTRL:\t%08x\n",
                        datalen,
                        tracer.ncmppairs,
                        etb_wa,
                        etb_ra,
                        etb_st,
-                       etb_fc,
+                       etb_fc
+                       );
+
+       for (id = 0; id < tracer.etm_regs_count; id++) {
+               etm_unlock(&tracer, id);
+               etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
+               etm_st = etm_readl(&tracer, id, ETMR_STATUS);
+               etm_lock(&tracer, id);
+               ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
+                       "ETMR_STATUS:\t%08x\n",
                        etm_ctrl,
                        etm_st
                        );
+       }
+       mutex_unlock(&tracer.mutex);
+
+       return ret;
 }
 
 static struct kobj_attribute trace_info_attr =
@@ -531,42 +658,260 @@ static ssize_t trace_mode_store(struct kobject *kobj,
 static struct kobj_attribute trace_mode_attr =
        __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
 
+static ssize_t trace_contextid_size_show(struct kobject *kobj,
+                                        struct kobj_attribute *attr,
+                                        char *buf)
+{
+       /* 0: No context id tracing, 1: One byte, 2: Two bytes, 3: Four bytes */
+       return sprintf(buf, "%d\n", (1 << tracer.etm_contextid_size) >> 1);
+}
+
+static ssize_t trace_contextid_size_store(struct kobject *kobj,
+                                         struct kobj_attribute *attr,
+                                         const char *buf, size_t n)
+{
+       unsigned int contextid_size;
+
+       if (sscanf(buf, "%u", &contextid_size) != 1)
+               return -EINVAL;
+
+       if (contextid_size == 3 || contextid_size > 4)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       tracer.etm_contextid_size = fls(contextid_size);
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+static struct kobj_attribute trace_contextid_size_attr =
+       __ATTR(trace_contextid_size, 0644,
+               trace_contextid_size_show, trace_contextid_size_store);
+
+static ssize_t trace_branch_output_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr,
+                                       char *buf)
+{
+       return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
+}
+
+static ssize_t trace_branch_output_store(struct kobject *kobj,
+                                        struct kobj_attribute *attr,
+                                        const char *buf, size_t n)
+{
+       unsigned int branch_output;
+
+       if (sscanf(buf, "%u", &branch_output) != 1)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       if (branch_output) {
+               tracer.flags |= TRACER_BRANCHOUTPUT;
+               /* Branch broadcasting is incompatible with the return stack */
+               tracer.flags &= ~TRACER_RETURN_STACK;
+       } else {
+               tracer.flags &= ~TRACER_BRANCHOUTPUT;
+       }
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+static struct kobj_attribute trace_branch_output_attr =
+       __ATTR(trace_branch_output, 0644,
+               trace_branch_output_show, trace_branch_output_store);
+
+static ssize_t trace_return_stack_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
+}
+
+static ssize_t trace_return_stack_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned int return_stack;
+
+       if (sscanf(buf, "%u", &return_stack) != 1)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       if (return_stack) {
+               tracer.flags |= TRACER_RETURN_STACK;
+               /* Return stack is incompatible with branch broadcasting */
+               tracer.flags &= ~TRACER_BRANCHOUTPUT;
+       } else {
+               tracer.flags &= ~TRACER_RETURN_STACK;
+       }
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+static struct kobj_attribute trace_return_stack_attr =
+       __ATTR(trace_return_stack, 0644,
+               trace_return_stack_show, trace_return_stack_store);
+
+static ssize_t trace_timestamp_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
+}
+
+static ssize_t trace_timestamp_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned int timestamp;
+
+       if (sscanf(buf, "%u", &timestamp) != 1)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       if (timestamp)
+               tracer.flags |= TRACER_TIMESTAMP;
+       else
+               tracer.flags &= ~TRACER_TIMESTAMP;
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+static struct kobj_attribute trace_timestamp_attr =
+       __ATTR(trace_timestamp, 0644,
+               trace_timestamp_show, trace_timestamp_store);
+
+static ssize_t trace_range_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       return sprintf(buf, "%08lx %08lx\n",
+                       tracer.range_start, tracer.range_end);
+}
+
+static ssize_t trace_range_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned long range_start, range_end;
+
+       if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       tracer.range_start = range_start;
+       tracer.range_end = range_end;
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+
+static struct kobj_attribute trace_range_attr =
+       __ATTR(trace_range, 0644, trace_range_show, trace_range_store);
+
+static ssize_t trace_data_range_show(struct kobject *kobj,
+                                 struct kobj_attribute *attr,
+                                 char *buf)
+{
+       unsigned long range_start;
+       u64 range_end;
+       mutex_lock(&tracer.mutex);
+       range_start = tracer.data_range_start;
+       range_end = tracer.data_range_end;
+       if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
+               range_end = 0x100000000ULL;
+       mutex_unlock(&tracer.mutex);
+       return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
+}
+
+static ssize_t trace_data_range_store(struct kobject *kobj,
+                                  struct kobj_attribute *attr,
+                                  const char *buf, size_t n)
+{
+       unsigned long range_start;
+       u64 range_end;
+
+       if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
+               return -EINVAL;
+
+       mutex_lock(&tracer.mutex);
+       tracer.data_range_start = range_start;
+       tracer.data_range_end = (unsigned long)range_end;
+       if (range_end)
+               tracer.flags |= TRACER_TRACE_DATA;
+       else
+               tracer.flags &= ~TRACER_TRACE_DATA;
+       mutex_unlock(&tracer.mutex);
+
+       return n;
+}
+
+
+static struct kobj_attribute trace_data_range_attr =
+       __ATTR(trace_data_range, 0644,
+               trace_data_range_show, trace_data_range_store);
+
 static int etm_probe(struct amba_device *dev, const struct amba_id *id)
 {
        struct tracectx *t = &tracer;
        int ret = 0;
+       void __iomem **new_regs;
+       int new_count;
+       u32 etmccr;
+       u32 etmidr;
+       u32 etmccer = 0;
+       u8 etm_version = 0;
+
+       mutex_lock(&t->mutex);
+       new_count = t->etm_regs_count + 1;
+       new_regs = krealloc(t->etm_regs,
+                               sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
 
-       if (t->etm_regs) {
-               dev_dbg(&dev->dev, "ETM already initialized\n");
-               ret = -EBUSY;
+       if (!new_regs) {
+               dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
+               ret = -ENOMEM;
                goto out;
        }
+       t->etm_regs = new_regs;
 
        ret = amba_request_regions(dev, NULL);
        if (ret)
                goto out;
 
-       t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
-       if (!t->etm_regs) {
+       t->etm_regs[t->etm_regs_count] =
+               ioremap_nocache(dev->res.start, resource_size(&dev->res));
+       if (!t->etm_regs[t->etm_regs_count]) {
                ret = -ENOMEM;
                goto out_release;
        }
 
-       amba_set_drvdata(dev, t);
+       amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
 
-       mutex_init(&t->mutex);
-       t->dev = &dev->dev;
-       t->flags = TRACER_CYCLE_ACC;
+       t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA | TRACER_BRANCHOUTPUT;
        t->etm_portsz = 1;
+       t->etm_contextid_size = 3;
 
-       etm_unlock(t);
-       (void)etm_readl(t, ETMMR_PDSR);
+       etm_unlock(t, t->etm_regs_count);
+       (void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
        /* dummy first read */
-       (void)etm_readl(&tracer, ETMMR_OSSRR);
-
-       t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
-       etm_writel(t, 0x440, ETMR_CTRL);
-       etm_lock(t);
+       (void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
+
+       etmccr = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE);
+       t->ncmppairs = etmccr & 0xf;
+       if (etmccr & ETMCCR_ETMIDR_PRESENT) {
+               etmidr = etm_readl(t, t->etm_regs_count, ETMR_ID);
+               etm_version = ETMIDR_VERSION(etmidr);
+               if (etm_version >= ETMIDR_VERSION_3_1)
+                       etmccer = etm_readl(t, t->etm_regs_count, ETMR_CCE);
+       }
+       etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
+       etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
+       etm_lock(t, t->etm_regs_count);
 
        ret = sysfs_create_file(&dev->dev.kobj,
                        &trace_running_attr.attr);
@@ -582,36 +927,101 @@ static int etm_probe(struct amba_device *dev, const struct amba_id *id)
        if (ret)
                dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
 
-       dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
+       ret = sysfs_create_file(&dev->dev.kobj,
+                               &trace_contextid_size_attr.attr);
+       if (ret)
+               dev_dbg(&dev->dev,
+                       "Failed to create trace_contextid_size in sysfs\n");
+
+       ret = sysfs_create_file(&dev->dev.kobj,
+                               &trace_branch_output_attr.attr);
+       if (ret)
+               dev_dbg(&dev->dev,
+                       "Failed to create trace_branch_output in sysfs\n");
+
+       if (etmccer & ETMCCER_RETURN_STACK_IMPLEMENTED) {
+               ret = sysfs_create_file(&dev->dev.kobj,
+                                       &trace_return_stack_attr.attr);
+               if (ret)
+                       dev_dbg(&dev->dev,
+                             "Failed to create trace_return_stack in sysfs\n");
+       }
+
+       if (etmccer & ETMCCER_TIMESTAMPING_IMPLEMENTED) {
+               ret = sysfs_create_file(&dev->dev.kobj,
+                                       &trace_timestamp_attr.attr);
+               if (ret)
+                       dev_dbg(&dev->dev,
+                               "Failed to create trace_timestamp in sysfs\n");
+       }
+
+       ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
+       if (ret)
+               dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
+
+       if (etm_version < ETMIDR_VERSION_PFT_1_0) {
+               ret = sysfs_create_file(&dev->dev.kobj,
+                                       &trace_data_range_attr.attr);
+               if (ret)
+                       dev_dbg(&dev->dev,
+                               "Failed to create trace_data_range in sysfs\n");
+       } else {
+               tracer.flags &= ~TRACER_TRACE_DATA;
+       }
+
+       dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
+
+       /* Enable formatter if there are multiple trace sources */
+       if (new_count > 1)
+               t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
+
+       t->etm_regs_count = new_count;
 
 out:
+       mutex_unlock(&t->mutex);
        return ret;
 
 out_unmap:
        amba_set_drvdata(dev, NULL);
-       iounmap(t->etm_regs);
+       iounmap(t->etm_regs[t->etm_regs_count]);
 
 out_release:
        amba_release_regions(dev);
 
+       mutex_unlock(&t->mutex);
        return ret;
 }
 
 static int etm_remove(struct amba_device *dev)
 {
-       struct tracectx *t = amba_get_drvdata(dev);
+       int i;
+       struct tracectx *t = &tracer;
+       void __iomem    *etm_regs = amba_get_drvdata(dev);
+
+       sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
+       sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
 
        amba_set_drvdata(dev, NULL);
 
-       iounmap(t->etm_regs);
-       t->etm_regs = NULL;
+       mutex_lock(&t->mutex);
+       for (i = 0; i < t->etm_regs_count; i++)
+               if (t->etm_regs[i] == etm_regs)
+                       break;
+       for (; i < t->etm_regs_count - 1; i++)
+               t->etm_regs[i] = t->etm_regs[i + 1];
+       t->etm_regs_count--;
+       if (!t->etm_regs_count) {
+               kfree(t->etm_regs);
+               t->etm_regs = NULL;
+       }
+       mutex_unlock(&t->mutex);
 
+       iounmap(etm_regs);
        amba_release_regions(dev);
 
-       sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
-
        return 0;
 }
 
@@ -620,6 +1030,10 @@ static struct amba_id etm_ids[] = {
                .id     = 0x0003b921,
                .mask   = 0x0007ffff,
        },
+       {
+               .id     = 0x0003b950,
+               .mask   = 0x0007ffff,
+       },
        { 0, 0 },
 };
 
@@ -637,6 +1051,8 @@ static int __init etm_init(void)
 {
        int retval;
 
+       mutex_init(&tracer.mutex);
+
        retval = amba_driver_register(&etb_driver);
        if (retval) {
                printk(KERN_ERR "Failed to register etb\n");
index 34e56647dceeee88d99f65d5fd0a6e00fb46a0fd..6a740a93f4bb3c293db3b9ea838101e0ec880474 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/ftrace.h>
+#include <linux/module.h>
 #include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
@@ -63,6 +64,20 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
 }
 #endif
 
+int ftrace_arch_code_modify_prepare(void)
+{
+       set_kernel_text_rw();
+       set_all_modules_text_rw();
+       return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       set_all_modules_text_ro();
+       set_kernel_text_ro();
+       return 0;
+}
+
 static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
 {
        return arm_gen_branch_link(pc, addr);
index c6dec5fc20aa42440ce7267766f257449550550a..269732ee3fdaf6af6876358be5f7b9c2302b6245 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/cpuidle.h>
 #include <linux/leds.h>
+#include <linux/console.h>
 
 #include <asm/cacheflush.h>
 #include <asm/idmap.h>
@@ -59,6 +60,18 @@ static const char *isa_modes[] = {
 
 static volatile int hlt_counter;
 
+#ifdef CONFIG_SMP
+void arch_trigger_all_cpu_backtrace(void)
+{
+       smp_send_all_cpu_backtrace();
+}
+#else
+void arch_trigger_all_cpu_backtrace(void)
+{
+       dump_stack();
+}
+#endif
+
 void disable_hlt(void)
 {
        hlt_counter++;
@@ -92,6 +105,31 @@ __setup("hlt", hlt_setup);
 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
 typedef void (*phys_reset_t)(unsigned long);
 
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+       printk("\n");
+       pr_emerg("Restarting %s\n", linux_banner);
+       if (console_trylock()) {
+               console_unlock();
+               return;
+       }
+
+       mdelay(50);
+
+       local_irq_disable();
+       if (!console_trylock())
+               pr_emerg("arm_restart: Console was locked! Busting\n");
+       else
+               pr_emerg("arm_restart: Console was locked!\n");
+       console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
 /*
  * A temporary stack to use for CPU reset. This is static so that we
  * don't clobber it with the identity mapping. When running with this
@@ -187,6 +225,7 @@ void cpu_idle(void)
 
        /* endless idle loop with no priority at all */
        while (1) {
+               idle_notifier_call_chain(IDLE_START);
                tick_nohz_idle_enter();
                rcu_idle_enter();
                ledtrig_cpu(CPU_LED_IDLE_START);
@@ -223,6 +262,7 @@ void cpu_idle(void)
                ledtrig_cpu(CPU_LED_IDLE_END);
                rcu_idle_exit();
                tick_nohz_idle_exit();
+               idle_notifier_call_chain(IDLE_END);
                schedule_preempt_disabled();
        }
 }
@@ -240,6 +280,15 @@ __setup("reboot=", reboot_setup);
 void machine_shutdown(void)
 {
 #ifdef CONFIG_SMP
+       /*
+        * Disable preemption so we're guaranteed to
+        * run to power off or reboot and prevent
+        * the possibility of switching to another
+        * thread that might wind up blocking on
+        * one of the stopped CPUs.
+        */
+       preempt_disable();
+
        smp_send_stop();
 #endif
 }
@@ -262,6 +311,10 @@ void machine_restart(char *cmd)
 {
        machine_shutdown();
 
+       /* Flush the console to make sure all the relevant messages make it
+        * out to the console drivers */
+       arm_machine_flush_console();
+
        arm_pm_restart(reboot_mode, cmd);
 
        /* Give a grace period for failure to restart of 1s */
@@ -273,6 +326,77 @@ void machine_restart(char *cmd)
        while (1);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+       int     i, j;
+       int     nlines;
+       u32     *p;
+
+       /*
+        * don't attempt to dump non-kernel addresses or
+        * values that are probably just small negative numbers
+        */
+       if (addr < PAGE_OFFSET || addr > -256UL)
+               return;
+
+       printk("\n%s: %#lx:\n", name, addr);
+
+       /*
+        * round address down to a 32 bit boundary
+        * and always dump a multiple of 32 bytes
+        */
+       p = (u32 *)(addr & ~(sizeof(u32) - 1));
+       nbytes += (addr & (sizeof(u32) - 1));
+       nlines = (nbytes + 31) / 32;
+
+
+       for (i = 0; i < nlines; i++) {
+               /*
+                * just display low 16 bits of address to keep
+                * each line of the dump < 80 characters
+                */
+               printk("%04lx ", (unsigned long)p & 0xffff);
+               for (j = 0; j < 8; j++) {
+                       u32     data;
+                       if (probe_kernel_address(p, data)) {
+                               printk(" ********");
+                       } else {
+                               printk(" %08x", data);
+                       }
+                       ++p;
+               }
+               printk("\n");
+       }
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+       mm_segment_t fs;
+
+       fs = get_fs();
+       set_fs(KERNEL_DS);
+       show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+       show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+       show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+       show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+       show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+       show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+       show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+       show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+       show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+       show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+       show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+       show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+       show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+       show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+       show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+       show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+       set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
        unsigned long flags;
@@ -332,6 +456,8 @@ void __show_regs(struct pt_regs *regs)
                printk("Control: %08x%s\n", ctrl, buf);
        }
 #endif
+
+       show_extra_register_data(regs, 128);
 }
 
 void show_regs(struct pt_regs * regs)
index 650a7cfbd52cfef555381d7b866a564de54071f8..b0070652243d264d40d6a4d51e176a7b1220536b 100644 (file)
@@ -66,6 +66,7 @@ enum ipi_msg_type {
        IPI_CALL_FUNC,
        IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
+       IPI_CPU_BACKTRACE,
 };
 
 static DECLARE_COMPLETION(cpu_running);
@@ -445,6 +446,7 @@ static const char *ipi_types[NR_IPI] = {
        S(IPI_CALL_FUNC, "Function call interrupts"),
        S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
        S(IPI_CPU_STOP, "CPU stop interrupts"),
+       S(IPI_CPU_BACKTRACE, "CPU backtrace"),
 };
 
 void show_ipi_list(struct seq_file *p, int prec)
@@ -579,6 +581,58 @@ static void ipi_cpu_stop(unsigned int cpu)
                cpu_relax();
 }
 
+static cpumask_t backtrace_mask;
+static DEFINE_RAW_SPINLOCK(backtrace_lock);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+void smp_send_all_cpu_backtrace(void)
+{
+       unsigned int this_cpu = smp_processor_id();
+       int i;
+
+       if (test_and_set_bit(0, &backtrace_flag))
+               /*
+                * If there is already a trigger_all_cpu_backtrace() in progress
+                * (backtrace_flag == 1), don't output double cpu dump infos.
+                */
+               return;
+
+       cpumask_copy(&backtrace_mask, cpu_online_mask);
+       cpu_clear(this_cpu, backtrace_mask);
+
+       pr_info("Backtrace for cpu %d (current):\n", this_cpu);
+       dump_stack();
+
+       pr_info("\nsending IPI to all other CPUs:\n");
+       smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
+
+       /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
+       for (i = 0; i < 10 * 1000; i++) {
+               if (cpumask_empty(&backtrace_mask))
+                       break;
+               mdelay(1);
+       }
+
+       clear_bit(0, &backtrace_flag);
+       smp_mb__after_clear_bit();
+}
+
+/*
+ * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
+ */
+static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
+{
+       if (cpu_isset(cpu, backtrace_mask)) {
+               raw_spin_lock(&backtrace_lock);
+               pr_warning("IPI backtrace for cpu %d\n", cpu);
+               show_regs(regs);
+               raw_spin_unlock(&backtrace_lock);
+               cpu_clear(cpu, backtrace_mask);
+       }
+}
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -627,6 +681,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                irq_exit();
                break;
 
+       case IPI_CPU_BACKTRACE:
+               ipi_cpu_backtrace(cpu, regs);
+               break;
+
        default:
                printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
                       cpu, ipinr);
index 8a9c4cb50a93f3309588af662d0863f86daf3aff..9d32a253aac9253053249b8fc86d7541192b16a5 100644 (file)
@@ -7,6 +7,7 @@ obj-y                           := dma-mapping.o extable.o fault.o init.o \
 
 obj-$(CONFIG_MMU)              += fault-armv.o flush.o idmap.o ioremap.o \
                                   mmap.o pgd.o mmu.o vmregion.o
+obj-$(CONFIG_DEBUG_RODATA)     += rodata.o
 
 ifneq ($(CONFIG_MMU),y)
 obj-y                          += nommu.o
index d8fd4d4bd3d45ecdc66ad2c74885795df9681ea7..7a3d3d8d98d7fa384e998d271b1ccbbf5b3de256 100644 (file)
@@ -270,6 +270,11 @@ v6_dma_clean_range:
  *     - end     - virtual end address of region
  */
 ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+       sub     r2, r1, r0
+       cmp     r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+       bhi     v6_dma_flush_dcache_all
+#endif
 #ifdef CONFIG_DMA_CACHE_RWFO
        ldrb    r2, [r0]                @ read for ownership
        strb    r2, [r0]                @ write for ownership
@@ -292,6 +297,18 @@ ENTRY(v6_dma_flush_range)
        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
        mov     pc, lr
 
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+       mov     r0, #0
+#ifdef HARVARD_CACHE
+       mcr     p15, 0, r0, c7, c14, 0          @ D cache clean+invalidate
+#else
+       mcr     p15, 0, r0, c7, c15, 0          @ Cache clean+invalidate
+#endif
+       mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer
+       mov     pc, lr
+#endif
+
 /*
  *     dma_map_area(start, size, dir)
  *     - start - kernel virtual start address
index 0c10c20bd241dd69c0c6e8d0ae778d6276d5aa53..11b4b6d4f41c52106a49a88527b88fc04f37e5a4 100644 (file)
@@ -555,11 +555,25 @@ static void __init *early_alloc(unsigned long sz)
        return early_alloc_aligned(sz, sz);
 }
 
-static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
+static pte_t * __init early_pte_alloc(pmd_t *pmd)
+{
+       if (pmd_none(*pmd) || pmd_bad(*pmd))
+               return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+       return pmd_page_vaddr(*pmd);
+}
+
+static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
+{
+       __pmd_populate(pmd, __pa(pte), prot);
+       BUG_ON(pmd_bad(*pmd));
+}
+
+static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
+       unsigned long addr, unsigned long prot)
 {
        if (pmd_none(*pmd)) {
-               pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
-               __pmd_populate(pmd, __pa(pte), prot);
+               pte_t *pte = early_pte_alloc(pmd);
+               early_pte_install(pmd, pte, prot);
        }
        BUG_ON(pmd_bad(*pmd));
        return pte_offset_kernel(pmd, addr);
@@ -569,11 +583,17 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
                                  unsigned long end, unsigned long pfn,
                                  const struct mem_type *type)
 {
-       pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+       pte_t *start_pte = early_pte_alloc(pmd);
+       pte_t *pte = start_pte + pte_index(addr);
+
+       /* If replacing a section mapping, the whole section must be replaced */
+       BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
+
        do {
                set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+       early_pte_install(pmd, start_pte, type->prot_l1);
 }
 
 static void __init map_init_section(pmd_t *pmd, unsigned long addr,
@@ -633,7 +653,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
-       unsigned long end, unsigned long phys, const struct mem_type *type)
+       unsigned long end, unsigned long phys, const struct mem_type *type,
+       bool force_pages)
 {
        pud_t *pud = pud_offset(pgd, addr);
        unsigned long next;
@@ -714,7 +735,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
  * offsets, and we take full advantage of sections and
  * supersections.
  */
-static void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md, bool force_pages)
 {
        unsigned long addr, length, end;
        phys_addr_t phys;
@@ -764,7 +785,7 @@ static void __init create_mapping(struct map_desc *md)
        do {
                unsigned long next = pgd_addr_end(addr, end);
 
-               alloc_init_pud(pgd, addr, next, phys, type);
+               alloc_init_pud(pgd, addr, next, phys, type, force_pages);
 
                phys += next - addr;
                addr = next;
@@ -785,7 +806,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
        vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
 
        for (md = io_desc; nr; md++, nr--) {
-               create_mapping(md);
+               create_mapping(md, false);
                vm->addr = (void *)(md->virtual & PAGE_MASK);
                vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
                vm->phys_addr = __pfn_to_phys(md->pfn);
@@ -909,7 +930,7 @@ void __init debug_ll_io_init(void)
        map.virtual &= PAGE_MASK;
        map.length = PAGE_SIZE;
        map.type = MT_DEVICE;
-       create_mapping(&map);
+       create_mapping(&map, false);
 }
 #endif
 
@@ -954,6 +975,28 @@ void __init sanity_check_meminfo(void)
                struct membank *bank = &meminfo.bank[j];
                *bank = meminfo.bank[i];
 
+#ifdef CONFIG_SPARSEMEM
+               if (pfn_to_section_nr(bank_pfn_start(bank)) !=
+                   pfn_to_section_nr(bank_pfn_end(bank) - 1)) {
+                       phys_addr_t sz;
+                       unsigned long start_pfn = bank_pfn_start(bank);
+                       unsigned long end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
+                       sz = ((phys_addr_t)(end_pfn - start_pfn) << PAGE_SHIFT);
+
+                       if (meminfo.nr_banks >= NR_BANKS) {
+                               pr_crit("NR_BANKS too low, ignoring %lld bytes of memory\n",
+                                       (unsigned long long)(bank->size - sz));
+                       } else {
+                               memmove(bank + 1, bank,
+                                       (meminfo.nr_banks - i) * sizeof(*bank));
+                               meminfo.nr_banks++;
+                               bank[1].size -= sz;
+                               bank[1].start = __pfn_to_phys(end_pfn);
+                       }
+                       bank->size = sz;
+               }
+#endif
+
                if (bank->start > ULONG_MAX)
                        highmem = 1;
 
@@ -1151,7 +1194,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = MODULES_VADDR;
        map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
        map.type = MT_ROM;
-       create_mapping(&map);
+       create_mapping(&map, false);
 #endif
 
        /*
@@ -1162,14 +1205,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = FLUSH_BASE;
        map.length = SZ_1M;
        map.type = MT_CACHECLEAN;
-       create_mapping(&map);
+       create_mapping(&map, false);
 #endif
 #ifdef FLUSH_BASE_MINICACHE
        map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
        map.virtual = FLUSH_BASE_MINICACHE;
        map.length = SZ_1M;
        map.type = MT_MINICLEAN;
-       create_mapping(&map);
+       create_mapping(&map, false);
 #endif
 
        /*
@@ -1181,12 +1224,12 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        map.virtual = 0xffff0000;
        map.length = PAGE_SIZE;
        map.type = MT_HIGH_VECTORS;
-       create_mapping(&map);
+       create_mapping(&map, false);
 
        if (!vectors_high()) {
                map.virtual = 0;
                map.type = MT_LOW_VECTORS;
-               create_mapping(&map);
+               create_mapping(&map, false);
        }
 
        /*
@@ -1212,20 +1255,23 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 static void __init kmap_init(void)
 {
 #ifdef CONFIG_HIGHMEM
-       pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
+       pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
                PKMAP_BASE, _PAGE_KERNEL_TABLE);
 #endif
 }
 
+
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
+       phys_addr_t start;
+       phys_addr_t end;
+       struct map_desc map;
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
-               phys_addr_t start = reg->base;
-               phys_addr_t end = start + reg->size;
-               struct map_desc map;
+               start = reg->base;
+               end = start + reg->size;
 
                if (end > arm_lowmem_limit)
                        end = arm_lowmem_limit;
@@ -1237,8 +1283,20 @@ static void __init map_lowmem(void)
                map.length = end - start;
                map.type = MT_MEMORY;
 
-               create_mapping(&map);
+               create_mapping(&map, false);
        }
+
+#ifdef CONFIG_DEBUG_RODATA
+       start = __pa(_stext) & PMD_MASK;
+       end = ALIGN(__pa(__end_rodata), PMD_SIZE);
+
+       map.pfn = __phys_to_pfn(start);
+       map.virtual = __phys_to_virt(start);
+       map.length = end - start;
+       map.type = MT_MEMORY;
+
+       create_mapping(&map, true);
+#endif
 }
 
 /*
diff --git a/arch/arm/mm/rodata.c b/arch/arm/mm/rodata.c
new file mode 100644 (file)
index 0000000..9a8eb84
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ *  linux/arch/arm/mm/rodata.c
+ *
+ *  Copyright (C) 2011 Google, Inc.
+ *
+ *  Author: Colin Cross <ccross@android.com>
+ *
+ *  Based on x86 implementation in arch/x86/mm/init_32.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+#include <asm/rodata.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+static int kernel_set_to_readonly __read_mostly;
+
+#ifdef CONFIG_DEBUG_RODATA_TEST
+static const int rodata_test_data = 0xC3;
+
+static noinline void rodata_test(void)
+{
+       int result;
+
+       pr_info("%s: attempting to write to read-only section:\n", __func__);
+
+       if (*(volatile int *)&rodata_test_data != 0xC3) {
+               pr_err("read only data changed before test\n");
+               return;
+       }
+
+       /*
+        * Attempt to to write to rodata_test_data, trapping the expected
+        * data abort.  If the trap executed, result will be 1.  If it didn't,
+        * result will be 0xFF.
+        */
+       asm volatile(
+               "0:     str     %[zero], [%[rodata_test_data]]\n"
+               "       mov     %[result], #0xFF\n"
+               "       b       2f\n"
+               "1:     mov     %[result], #1\n"
+               "2:\n"
+
+               /* Exception fixup - if store at label 0 faults, jumps to 1 */
+               ".pushsection __ex_table, \"a\"\n"
+               "       .long   0b, 1b\n"
+               ".popsection\n"
+
+               : [result] "=r" (result)
+               : [rodata_test_data] "r" (&rodata_test_data), [zero] "r" (0)
+               : "memory"
+       );
+
+       if (result == 1)
+               pr_info("write to read-only section trapped, success\n");
+       else
+               pr_err("write to read-only section NOT trapped, test failed\n");
+
+       if (*(volatile int *)&rodata_test_data != 0xC3)
+               pr_err("read only data changed during write\n");
+}
+#else
+static inline void rodata_test(void) { }
+#endif
+
+static int set_page_attributes(unsigned long virt, int numpages,
+       pte_t (*f)(pte_t))
+{
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long start = virt;
+       unsigned long end = virt + (numpages << PAGE_SHIFT);
+       unsigned long pmd_end;
+
+       while (virt < end) {
+               pmd = pmd_off_k(virt);
+               pmd_end = min(ALIGN(virt + 1, PMD_SIZE), end);
+
+               if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_TABLE) {
+                       pr_err("%s: pmd %p=%08lx for %08lx not page table\n",
+                               __func__, pmd, pmd_val(*pmd), virt);
+                       virt = pmd_end;
+                       continue;
+               }
+
+               while (virt < pmd_end) {
+                       pte = pte_offset_kernel(pmd, virt);
+                       set_pte_ext(pte, f(*pte), 0);
+                       virt += PAGE_SIZE;
+               }
+       }
+
+       flush_tlb_kernel_range(start, end);
+
+       return 0;
+}
+
+int set_memory_ro(unsigned long virt, int numpages)
+{
+       return set_page_attributes(virt, numpages, pte_wrprotect);
+}
+EXPORT_SYMBOL(set_memory_ro);
+
+int set_memory_rw(unsigned long virt, int numpages)
+{
+       return set_page_attributes(virt, numpages, pte_mkwrite);
+}
+EXPORT_SYMBOL(set_memory_rw);
+
+void set_kernel_text_rw(void)
+{
+       unsigned long start = PAGE_ALIGN((unsigned long)_text);
+       unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_debug("Set kernel text: %lx - %lx to read-write\n",
+                start, start + size);
+
+       set_memory_rw(start, size >> PAGE_SHIFT);
+}
+
+void set_kernel_text_ro(void)
+{
+       unsigned long start = PAGE_ALIGN((unsigned long)_text);
+       unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start;
+
+       if (!kernel_set_to_readonly)
+               return;
+
+       pr_info_once("Write protecting the kernel text section %lx - %lx\n",
+               start, start + size);
+
+       pr_debug("Set kernel text: %lx - %lx to read only\n",
+                start, start + size);
+
+       set_memory_ro(start, size >> PAGE_SHIFT);
+}
+
+void mark_rodata_ro(void)
+{
+       kernel_set_to_readonly = 1;
+
+       set_kernel_text_ro();
+
+       rodata_test();
+}
index c5d1785373ed38c25f1096467d103cd6440c1b66..02bab09707f28cc7dd2a10bc1a3b9af35f8e1aef 100644 (file)
@@ -1,13 +1,6 @@
 #ifndef _ASM_X86_IDLE_H
 #define _ASM_X86_IDLE_H
 
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
 #ifdef CONFIG_X86_64
 void enter_idle(void);
 void exit_idle(void);
index 2ed787f15bf0cf397d6515c429d50544e04e9e34..5c38b7a70f813842d0579bd2d9f65ccb9c2a7118 100644 (file)
@@ -40,19 +40,6 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
 
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
-       atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
-       atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 #endif
 
 struct kmem_cache *task_xstate_cachep;
@@ -287,14 +274,14 @@ static inline void play_dead(void)
 void enter_idle(void)
 {
        this_cpu_write(is_idle, 1);
-       atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+       idle_notifier_call_chain(IDLE_START);
 }
 
 static void __exit_idle(void)
 {
        if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
                return;
-       atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+       idle_notifier_call_chain(IDLE_END);
 }
 
 /* Called from interrupts to signify idle end */
index 7dcfdd84efcf4221cff530cbead9a9c5c7da64f5..c8b520eea3667927838de9a230562e642668fe35 100644 (file)
@@ -1106,6 +1106,22 @@ static void disk_release(struct device *dev)
                blk_put_queue(disk->queue);
        kfree(disk);
 }
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+       struct disk_part_iter piter;
+       struct hd_struct *part;
+       int cnt = 0;
+
+       disk_part_iter_init(&piter, disk, 0);
+       while((part = disk_part_iter_next(&piter)))
+               cnt++;
+       disk_part_iter_exit(&piter);
+       add_uevent_var(env, "NPARTS=%u", cnt);
+       return 0;
+}
+
 struct class block_class = {
        .name           = "block",
 };
@@ -1124,6 +1140,7 @@ static struct device_type disk_type = {
        .groups         = disk_attr_groups,
        .release        = disk_release,
        .devnode        = block_devnode,
+       .uevent         = disk_uevent,
 };
 
 #ifdef CONFIG_PROC_FS
index 1cb4deca1324a0afd1d8239e6d564d5d3c8be1ec..7fad3f52b1ab12e25c6999e629e3ab14654f7655 100644 (file)
@@ -216,10 +216,21 @@ static void part_release(struct device *dev)
        kfree(p);
 }
 
+static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct hd_struct *part = dev_to_part(dev);
+
+       add_uevent_var(env, "PARTN=%u", part->partno);
+       if (part->info && part->info->volname[0])
+               add_uevent_var(env, "PARTNAME=%s", part->info->volname);
+       return 0;
+}
+
 struct device_type part_type = {
        .name           = "partition",
        .groups         = part_attr_groups,
        .release        = part_release,
+       .uevent         = part_uevent,
 };
 
 static void delete_partition_rcu_cb(struct rcu_head *head)
index 2b4e89ba15adafe1f6105c2dd06d5cfe0beaa8d1..945e5b809f1184c6a4ae798cb3d0532e48bfcd73 100644 (file)
@@ -98,6 +98,8 @@ source "drivers/memstick/Kconfig"
 
 source "drivers/leds/Kconfig"
 
+source "drivers/switch/Kconfig"
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
index 7e14f8e2873670a33b9f49c62f6d3dfad644899e..29aa64a8d92592d0f4f1374ca8ec748c182f8f5e 100644 (file)
@@ -108,6 +108,7 @@ obj-$(CONFIG_CPU_IDLE)              += cpuidle/
 obj-y                          += mmc/
 obj-$(CONFIG_MEMSTICK)         += memstick/
 obj-y                          += leds/
+obj-$(CONFIG_SWITCH)           += switch/
 obj-$(CONFIG_INFINIBAND)       += infiniband/
 obj-$(CONFIG_SGI_SN)           += sn/
 obj-y                          += firmware/
index c8b453939da21eedcf6c8c1728051fef98900897..2f5f34c9d6306ba84da9e55153ae02a61f737e06 100644 (file)
@@ -281,4 +281,30 @@ config CMA_AREAS
 
 endif
 
+config SYNC
+       bool "Synchronization framework"
+       default n
+       select ANON_INODES
+       help
+         This option enables the framework for synchronization between multiple
+         drivers.  Sync implementations can take advantage of hardware
+         synchronization built into devices like GPUs.
+
+config SW_SYNC
+       bool "Software synchronization objects"
+       default n
+       depends on SYNC
+       help
+         A sync object driver that uses a 32bit counter to coordinate
+         syncrhronization.  Useful when there is no hardware primitive backing
+         the synchronization.
+
+config SW_SYNC_USER
+       bool "Userspace API for SW_SYNC"
+       default n
+       depends on SW_SYNC
+       help
+         Provides a user space API to the sw sync object.
+         *WARNING* improper use of this can result in deadlocking kernel
+        drivers from userspace.
 endmenu
index 5aa2d703d19fac08073c54e27530ef8d5bc5b45d..1128a612e24d532a2a8066c607992f360231e6c9 100644 (file)
@@ -22,5 +22,8 @@ obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
 obj-$(CONFIG_REGMAP)   += regmap/
 obj-$(CONFIG_SOC_BUS) += soc.o
 
+obj-$(CONFIG_SYNC)     += sync.o
+obj-$(CONFIG_SW_SYNC)  += sw_sync.o
+
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
 
index 2b7f77d3fcb0d518d2a1f4e041f29b61d6e4521b..08ca6df6cd801051635c6cc85c2c01242d5ff8f1 100644 (file)
@@ -29,6 +29,8 @@
 #include <linux/async.h>
 #include <linux/suspend.h>
 #include <linux/cpuidle.h>
+#include <linux/timer.h>
+
 #include "../base.h"
 #include "power.h"
 
@@ -54,6 +56,12 @@ struct suspend_stats suspend_stats;
 static DEFINE_MUTEX(dpm_list_mtx);
 static pm_message_t pm_transition;
 
+struct dpm_watchdog {
+       struct device           *dev;
+       struct task_struct      *tsk;
+       struct timer_list       timer;
+};
+
 static int async_error;
 
 /**
@@ -386,6 +394,56 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
        return error;
 }
 
+/**
+ * dpm_wd_handler - Driver suspend / resume watchdog handler.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so BUG() out for
+ * a crash-dump
+ */
+static void dpm_wd_handler(unsigned long data)
+{
+       struct dpm_watchdog *wd = (void *)data;
+       struct device *dev      = wd->dev;
+       struct task_struct *tsk = wd->tsk;
+
+       dev_emerg(dev, "**** DPM device timeout ****\n");
+       show_stack(tsk, NULL);
+
+       BUG();
+}
+
+/**
+ * dpm_wd_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
+{
+       struct timer_list *timer = &wd->timer;
+
+       wd->dev = dev;
+       wd->tsk = get_current();
+
+       init_timer_on_stack(timer);
+       timer->expires = jiffies + HZ * 12;
+       timer->function = dpm_wd_handler;
+       timer->data = (unsigned long)wd;
+       add_timer(timer);
+}
+
+/**
+ * dpm_wd_clear - Disable pm watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_wd_clear(struct dpm_watchdog *wd)
+{
+       struct timer_list *timer = &wd->timer;
+
+       del_timer_sync(timer);
+       destroy_timer_on_stack(timer);
+}
+
 /*------------------------- Resume routines -------------------------*/
 
 /**
@@ -572,6 +630,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       struct dpm_watchdog wd;
 
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
@@ -587,6 +646,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
         * a resumed device, even if the device hasn't been completed yet.
         */
        dev->power.is_prepared = false;
+       dpm_wd_set(&wd, dev);
 
        if (!dev->power.is_suspended)
                goto Unlock;
@@ -638,6 +698,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 
  Unlock:
        device_unlock(dev);
+       dpm_wd_clear(&wd);
 
  Complete:
        complete_all(&dev->power.completion);
@@ -1055,6 +1116,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       struct dpm_watchdog wd;
 
        dpm_wait_for_children(dev, async);
 
@@ -1077,6 +1139,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
        if (dev->power.syscore)
                goto Complete;
+       
+       dpm_wd_set(&wd, dev);
 
        device_lock(dev);
 
@@ -1133,6 +1197,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
        device_unlock(dev);
 
+       dpm_wd_clear(&wd);
+
  Complete:
        complete_all(&dev->power.completion);
        if (error)
diff --git a/drivers/base/sw_sync.c b/drivers/base/sw_sync.c
new file mode 100644 (file)
index 0000000..b4d8529
--- /dev/null
@@ -0,0 +1,262 @@
+/*
+ * drivers/base/sw_sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/sw_sync.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+static int sw_sync_cmp(u32 a, u32 b)
+{
+       if (a == b)
+               return 0;
+
+       return ((s32)a - (s32)b) < 0 ? -1 : 1;
+}
+
+struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+{
+       struct sw_sync_pt *pt;
+
+       pt = (struct sw_sync_pt *)
+               sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
+
+       pt->value = value;
+
+       return (struct sync_pt *)pt;
+}
+EXPORT_SYMBOL(sw_sync_pt_create);
+
+static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
+{
+       struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt;
+       struct sw_sync_timeline *obj =
+               (struct sw_sync_timeline *)sync_pt->parent;
+
+       return (struct sync_pt *) sw_sync_pt_create(obj, pt->value);
+}
+
+static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
+{
+       struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+       struct sw_sync_timeline *obj =
+               (struct sw_sync_timeline *)sync_pt->parent;
+
+       return sw_sync_cmp(obj->value, pt->value) >= 0;
+}
+
+static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
+{
+       struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
+       struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
+
+       return sw_sync_cmp(pt_a->value, pt_b->value);
+}
+
+static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
+                                   void *data, int size)
+{
+       struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+
+       if (size < sizeof(pt->value))
+               return -ENOMEM;
+
+       memcpy(data, &pt->value, sizeof(pt->value));
+
+       return sizeof(pt->value);
+}
+
+static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
+                                      char *str, int size)
+{
+       struct sw_sync_timeline *timeline =
+               (struct sw_sync_timeline *)sync_timeline;
+       snprintf(str, size, "%d", timeline->value);
+}
+
+static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
+                                      char *str, int size)
+{
+       struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+       snprintf(str, size, "%d", pt->value);
+}
+
+struct sync_timeline_ops sw_sync_timeline_ops = {
+       .driver_name = "sw_sync",
+       .dup = sw_sync_pt_dup,
+       .has_signaled = sw_sync_pt_has_signaled,
+       .compare = sw_sync_pt_compare,
+       .fill_driver_data = sw_sync_fill_driver_data,
+       .timeline_value_str = sw_sync_timeline_value_str,
+       .pt_value_str = sw_sync_pt_value_str,
+};
+
+
+struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+       struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
+               sync_timeline_create(&sw_sync_timeline_ops,
+                                    sizeof(struct sw_sync_timeline),
+                                    name);
+
+       return obj;
+}
+EXPORT_SYMBOL(sw_sync_timeline_create);
+
+void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+       obj->value += inc;
+
+       sync_timeline_signal(&obj->obj);
+}
+EXPORT_SYMBOL(sw_sync_timeline_inc);
+
+#ifdef CONFIG_SW_SYNC_USER
+/* *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+int sw_sync_open(struct inode *inode, struct file *file)
+{
+       struct sw_sync_timeline *obj;
+       char task_comm[TASK_COMM_LEN];
+
+       get_task_comm(task_comm, current);
+
+       obj = sw_sync_timeline_create(task_comm);
+       if (obj == NULL)
+               return -ENOMEM;
+
+       file->private_data = obj;
+
+       return 0;
+}
+
+int sw_sync_release(struct inode *inode, struct file *file)
+{
+       struct sw_sync_timeline *obj = file->private_data;
+       sync_timeline_destroy(&obj->obj);
+       return 0;
+}
+
+long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg)
+{
+       int fd = get_unused_fd();
+       int err;
+       struct sync_pt *pt;
+       struct sync_fence *fence;
+       struct sw_sync_create_fence_data data;
+
+       if (fd < 0)
+               return fd;
+
+       if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+               err = -EFAULT;
+               goto err;
+       }
+
+       pt = sw_sync_pt_create(obj, data.value);
+       if (pt == NULL) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       data.name[sizeof(data.name) - 1] = '\0';
+       fence = sync_fence_create(data.name, pt);
+       if (fence == NULL) {
+               sync_pt_free(pt);
+               err = -ENOMEM;
+               goto err;
+       }
+
+       data.fence = fd;
+       if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+               sync_fence_put(fence);
+               err = -EFAULT;
+               goto err;
+       }
+
+       sync_fence_install(fence, fd);
+
+       return 0;
+
+err:
+       put_unused_fd(fd);
+       return err;
+}
+
+long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
+{
+       u32 value;
+
+       if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+               return -EFAULT;
+
+       sw_sync_timeline_inc(obj, value);
+
+       return 0;
+}
+
+long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct sw_sync_timeline *obj = file->private_data;
+
+       switch (cmd) {
+       case SW_SYNC_IOC_CREATE_FENCE:
+               return sw_sync_ioctl_create_fence(obj, arg);
+
+       case SW_SYNC_IOC_INC:
+               return sw_sync_ioctl_inc(obj, arg);
+
+       default:
+               return -ENOTTY;
+       }
+}
+
+static const struct file_operations sw_sync_fops = {
+       .owner = THIS_MODULE,
+       .open = sw_sync_open,
+       .release = sw_sync_release,
+       .unlocked_ioctl = sw_sync_ioctl,
+};
+
+static struct miscdevice sw_sync_dev = {
+       .minor  = MISC_DYNAMIC_MINOR,
+       .name   = "sw_sync",
+       .fops   = &sw_sync_fops,
+};
+
+int __init sw_sync_device_init(void)
+{
+       return misc_register(&sw_sync_dev);
+}
+
+void __exit sw_sync_device_remove(void)
+{
+       misc_deregister(&sw_sync_dev);
+}
+
+module_init(sw_sync_device_init);
+module_exit(sw_sync_device_remove);
+
+#endif /* CONFIG_SW_SYNC_USER */
diff --git a/drivers/base/sync.c b/drivers/base/sync.c
new file mode 100644 (file)
index 0000000..2e35996
--- /dev/null
@@ -0,0 +1,1015 @@
+/*
+ * drivers/base/sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/sync.h>
+#include <linux/uaccess.h>
+
+#include <linux/anon_inodes.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sync.h>
+
+static void sync_fence_signal_pt(struct sync_pt *pt);
+static int _sync_pt_has_signaled(struct sync_pt *pt);
+static void sync_fence_free(struct kref *kref);
+static void sync_dump(void);
+
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+
+static LIST_HEAD(sync_fence_list_head);
+static DEFINE_SPINLOCK(sync_fence_list_lock);
+
+struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
+                                          int size, const char *name)
+{
+       struct sync_timeline *obj;
+       unsigned long flags;
+
+       if (size < sizeof(struct sync_timeline))
+               return NULL;
+
+       obj = kzalloc(size, GFP_KERNEL);
+       if (obj == NULL)
+               return NULL;
+
+       kref_init(&obj->kref);
+       obj->ops = ops;
+       strlcpy(obj->name, name, sizeof(obj->name));
+
+       INIT_LIST_HEAD(&obj->child_list_head);
+       spin_lock_init(&obj->child_list_lock);
+
+       INIT_LIST_HEAD(&obj->active_list_head);
+       spin_lock_init(&obj->active_list_lock);
+
+       spin_lock_irqsave(&sync_timeline_list_lock, flags);
+       list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+       spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+       return obj;
+}
+EXPORT_SYMBOL(sync_timeline_create);
+
+static void sync_timeline_free(struct kref *kref)
+{
+       struct sync_timeline *obj =
+               container_of(kref, struct sync_timeline, kref);
+       unsigned long flags;
+
+       if (obj->ops->release_obj)
+               obj->ops->release_obj(obj);
+
+       spin_lock_irqsave(&sync_timeline_list_lock, flags);
+       list_del(&obj->sync_timeline_list);
+       spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+       kfree(obj);
+}
+
+void sync_timeline_destroy(struct sync_timeline *obj)
+{
+       obj->destroyed = true;
+
+       /*
+        * If this is not the last reference, signal any children
+        * that their parent is going away.
+        */
+
+       if (!kref_put(&obj->kref, sync_timeline_free))
+               sync_timeline_signal(obj);
+}
+EXPORT_SYMBOL(sync_timeline_destroy);
+
+static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
+{
+       unsigned long flags;
+
+       pt->parent = obj;
+
+       spin_lock_irqsave(&obj->child_list_lock, flags);
+       list_add_tail(&pt->child_list, &obj->child_list_head);
+       spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_timeline_remove_pt(struct sync_pt *pt)
+{
+       struct sync_timeline *obj = pt->parent;
+       unsigned long flags;
+
+       spin_lock_irqsave(&obj->active_list_lock, flags);
+       if (!list_empty(&pt->active_list))
+               list_del_init(&pt->active_list);
+       spin_unlock_irqrestore(&obj->active_list_lock, flags);
+
+       spin_lock_irqsave(&obj->child_list_lock, flags);
+       if (!list_empty(&pt->child_list)) {
+               list_del_init(&pt->child_list);
+       }
+       spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+void sync_timeline_signal(struct sync_timeline *obj)
+{
+       unsigned long flags;
+       LIST_HEAD(signaled_pts);
+       struct list_head *pos, *n;
+
+       trace_sync_timeline(obj);
+
+       spin_lock_irqsave(&obj->active_list_lock, flags);
+
+       list_for_each_safe(pos, n, &obj->active_list_head) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, active_list);
+
+               if (_sync_pt_has_signaled(pt)) {
+                       list_del_init(pos);
+                       list_add(&pt->signaled_list, &signaled_pts);
+                       kref_get(&pt->fence->kref);
+               }
+       }
+
+       spin_unlock_irqrestore(&obj->active_list_lock, flags);
+
+       list_for_each_safe(pos, n, &signaled_pts) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, signaled_list);
+
+               list_del_init(pos);
+               sync_fence_signal_pt(pt);
+               kref_put(&pt->fence->kref, sync_fence_free);
+       }
+}
+EXPORT_SYMBOL(sync_timeline_signal);
+
+struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
+{
+       struct sync_pt *pt;
+
+       if (size < sizeof(struct sync_pt))
+               return NULL;
+
+       pt = kzalloc(size, GFP_KERNEL);
+       if (pt == NULL)
+               return NULL;
+
+       INIT_LIST_HEAD(&pt->active_list);
+       kref_get(&parent->kref);
+       sync_timeline_add_pt(parent, pt);
+
+       return pt;
+}
+EXPORT_SYMBOL(sync_pt_create);
+
+void sync_pt_free(struct sync_pt *pt)
+{
+       if (pt->parent->ops->free_pt)
+               pt->parent->ops->free_pt(pt);
+
+       sync_timeline_remove_pt(pt);
+
+       kref_put(&pt->parent->kref, sync_timeline_free);
+
+       kfree(pt);
+}
+EXPORT_SYMBOL(sync_pt_free);
+
+/* call with pt->parent->active_list_lock held */
+static int _sync_pt_has_signaled(struct sync_pt *pt)
+{
+       int old_status = pt->status;
+
+       if (!pt->status)
+               pt->status = pt->parent->ops->has_signaled(pt);
+
+       if (!pt->status && pt->parent->destroyed)
+               pt->status = -ENOENT;
+
+       if (pt->status != old_status)
+               pt->timestamp = ktime_get();
+
+       return pt->status;
+}
+
+static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
+{
+       return pt->parent->ops->dup(pt);
+}
+
+/* Adds a sync pt to the active queue.  Called when added to a fence */
+static void sync_pt_activate(struct sync_pt *pt)
+{
+       struct sync_timeline *obj = pt->parent;
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&obj->active_list_lock, flags);
+
+       err = _sync_pt_has_signaled(pt);
+       if (err != 0)
+               goto out;
+
+       list_add_tail(&pt->active_list, &obj->active_list_head);
+
+out:
+       spin_unlock_irqrestore(&obj->active_list_lock, flags);
+}
+
+static int sync_fence_release(struct inode *inode, struct file *file);
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
+static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+                            unsigned long arg);
+
+
+static const struct file_operations sync_fence_fops = {
+       .release = sync_fence_release,
+       .poll = sync_fence_poll,
+       .unlocked_ioctl = sync_fence_ioctl,
+};
+
+static struct sync_fence *sync_fence_alloc(const char *name)
+{
+       struct sync_fence *fence;
+       unsigned long flags;
+
+       fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
+       if (fence == NULL)
+               return NULL;
+
+       fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
+                                        fence, 0);
+       if (fence->file == NULL)
+               goto err;
+
+       kref_init(&fence->kref);
+       strlcpy(fence->name, name, sizeof(fence->name));
+
+       INIT_LIST_HEAD(&fence->pt_list_head);
+       INIT_LIST_HEAD(&fence->waiter_list_head);
+       spin_lock_init(&fence->waiter_list_lock);
+
+       init_waitqueue_head(&fence->wq);
+
+       spin_lock_irqsave(&sync_fence_list_lock, flags);
+       list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
+       spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
+       return fence;
+
+err:
+       kfree(fence);
+       return NULL;
+}
+
+/* TODO: implement a create which takes more that one sync_pt */
+struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
+{
+       struct sync_fence *fence;
+
+       if (pt->fence)
+               return NULL;
+
+       fence = sync_fence_alloc(name);
+       if (fence == NULL)
+               return NULL;
+
+       pt->fence = fence;
+       list_add(&pt->pt_list, &fence->pt_list_head);
+       sync_pt_activate(pt);
+
+       /*
+        * signal the fence in case pt was activated before
+        * sync_pt_activate(pt) was called
+        */
+       sync_fence_signal_pt(pt);
+
+       return fence;
+}
+EXPORT_SYMBOL(sync_fence_create);
+
+static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
+{
+       struct list_head *pos;
+
+       list_for_each(pos, &src->pt_list_head) {
+               struct sync_pt *orig_pt =
+                       container_of(pos, struct sync_pt, pt_list);
+               struct sync_pt *new_pt = sync_pt_dup(orig_pt);
+
+               if (new_pt == NULL)
+                       return -ENOMEM;
+
+               new_pt->fence = dst;
+               list_add(&new_pt->pt_list, &dst->pt_list_head);
+       }
+
+       return 0;
+}
+
+static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
+{
+       struct list_head *src_pos, *dst_pos, *n;
+
+       list_for_each(src_pos, &src->pt_list_head) {
+               struct sync_pt *src_pt =
+                       container_of(src_pos, struct sync_pt, pt_list);
+               bool collapsed = false;
+
+               list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
+                       struct sync_pt *dst_pt =
+                               container_of(dst_pos, struct sync_pt, pt_list);
+                       /* collapse two sync_pts on the same timeline
+                        * to a single sync_pt that will signal at
+                        * the later of the two
+                        */
+                       if (dst_pt->parent == src_pt->parent) {
+                               if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
+                                       struct sync_pt *new_pt =
+                                               sync_pt_dup(src_pt);
+                                       if (new_pt == NULL)
+                                               return -ENOMEM;
+
+                                       new_pt->fence = dst;
+                                       list_replace(&dst_pt->pt_list,
+                                                    &new_pt->pt_list);
+                                       sync_pt_free(dst_pt);
+                               }
+                               collapsed = true;
+                               break;
+                       }
+               }
+
+               if (!collapsed) {
+                       struct sync_pt *new_pt = sync_pt_dup(src_pt);
+
+                       if (new_pt == NULL)
+                               return -ENOMEM;
+
+                       new_pt->fence = dst;
+                       list_add(&new_pt->pt_list, &dst->pt_list_head);
+               }
+       }
+
+       return 0;
+}
+
+static void sync_fence_detach_pts(struct sync_fence *fence)
+{
+       struct list_head *pos, *n;
+
+       list_for_each_safe(pos, n, &fence->pt_list_head) {
+               struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+               sync_timeline_remove_pt(pt);
+       }
+}
+
+static void sync_fence_free_pts(struct sync_fence *fence)
+{
+       struct list_head *pos, *n;
+
+       list_for_each_safe(pos, n, &fence->pt_list_head) {
+               struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+               sync_pt_free(pt);
+       }
+}
+
+struct sync_fence *sync_fence_fdget(int fd)
+{
+       struct file *file = fget(fd);
+
+       if (file == NULL)
+               return NULL;
+
+       if (file->f_op != &sync_fence_fops)
+               goto err;
+
+       return file->private_data;
+
+err:
+       fput(file);
+       return NULL;
+}
+EXPORT_SYMBOL(sync_fence_fdget);
+
+void sync_fence_put(struct sync_fence *fence)
+{
+       fput(fence->file);
+}
+EXPORT_SYMBOL(sync_fence_put);
+
+void sync_fence_install(struct sync_fence *fence, int fd)
+{
+       fd_install(fd, fence->file);
+}
+EXPORT_SYMBOL(sync_fence_install);
+
+static int sync_fence_get_status(struct sync_fence *fence)
+{
+       struct list_head *pos;
+       int status = 1;
+
+       list_for_each(pos, &fence->pt_list_head) {
+               struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+               int pt_status = pt->status;
+
+               if (pt_status < 0) {
+                       status = pt_status;
+                       break;
+               } else if (status == 1) {
+                       status = pt_status;
+               }
+       }
+
+       return status;
+}
+
+struct sync_fence *sync_fence_merge(const char *name,
+                                   struct sync_fence *a, struct sync_fence *b)
+{
+       struct sync_fence *fence;
+       struct list_head *pos;
+       int err;
+
+       fence = sync_fence_alloc(name);
+       if (fence == NULL)
+               return NULL;
+
+       err = sync_fence_copy_pts(fence, a);
+       if (err < 0)
+               goto err;
+
+       err = sync_fence_merge_pts(fence, b);
+       if (err < 0)
+               goto err;
+
+       list_for_each(pos, &fence->pt_list_head) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, pt_list);
+               sync_pt_activate(pt);
+       }
+
+       /*
+        * signal the fence in case one of it's pts were activated before
+        * they were activated
+        */
+       sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
+                                             struct sync_pt,
+                                             pt_list));
+
+       return fence;
+err:
+       sync_fence_free_pts(fence);
+       kfree(fence);
+       return NULL;
+}
+EXPORT_SYMBOL(sync_fence_merge);
+
+static void sync_fence_signal_pt(struct sync_pt *pt)
+{
+       LIST_HEAD(signaled_waiters);
+       struct sync_fence *fence = pt->fence;
+       struct list_head *pos;
+       struct list_head *n;
+       unsigned long flags;
+       int status;
+
+       status = sync_fence_get_status(fence);
+
+       spin_lock_irqsave(&fence->waiter_list_lock, flags);
+       /*
+        * this should protect against two threads racing on the signaled
+        * false -> true transition
+        */
+       if (status && !fence->status) {
+               list_for_each_safe(pos, n, &fence->waiter_list_head)
+                       list_move(pos, &signaled_waiters);
+
+               fence->status = status;
+       } else {
+               status = 0;
+       }
+       spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+
+       if (status) {
+               list_for_each_safe(pos, n, &signaled_waiters) {
+                       struct sync_fence_waiter *waiter =
+                               container_of(pos, struct sync_fence_waiter,
+                                            waiter_list);
+
+                       list_del(pos);
+                       waiter->callback(fence, waiter);
+               }
+               wake_up(&fence->wq);
+       }
+}
+
+int sync_fence_wait_async(struct sync_fence *fence,
+                         struct sync_fence_waiter *waiter)
+{
+       unsigned long flags;
+       int err = 0;
+
+       spin_lock_irqsave(&fence->waiter_list_lock, flags);
+
+       if (fence->status) {
+               err = fence->status;
+               goto out;
+       }
+
+       list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
+out:
+       spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+
+       return err;
+}
+EXPORT_SYMBOL(sync_fence_wait_async);
+
+int sync_fence_cancel_async(struct sync_fence *fence,
+                            struct sync_fence_waiter *waiter)
+{
+       struct list_head *pos;
+       struct list_head *n;
+       unsigned long flags;
+       int ret = -ENOENT;
+
+       spin_lock_irqsave(&fence->waiter_list_lock, flags);
+       /*
+        * Make sure waiter is still in waiter_list because it is possible for
+        * the waiter to be removed from the list while the callback is still
+        * pending.
+        */
+       list_for_each_safe(pos, n, &fence->waiter_list_head) {
+               struct sync_fence_waiter *list_waiter =
+                       container_of(pos, struct sync_fence_waiter,
+                                    waiter_list);
+               if (list_waiter == waiter) {
+                       list_del(pos);
+                       ret = 0;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL(sync_fence_cancel_async);
+
+static bool sync_fence_check(struct sync_fence *fence)
+{
+       /*
+        * Make sure that reads to fence->status are ordered with the
+        * wait queue event triggering
+        */
+       smp_rmb();
+       return fence->status != 0;
+}
+
+int sync_fence_wait(struct sync_fence *fence, long timeout)
+{
+       int err = 0;
+       struct sync_pt *pt;
+
+       trace_sync_wait(fence, 1);
+       list_for_each_entry(pt, &fence->pt_list_head, pt_list)
+               trace_sync_pt(pt);
+
+       if (timeout > 0) {
+               timeout = msecs_to_jiffies(timeout);
+               err = wait_event_interruptible_timeout(fence->wq,
+                                                      sync_fence_check(fence),
+                                                      timeout);
+       } else if (timeout < 0) {
+               err = wait_event_interruptible(fence->wq,
+                                              sync_fence_check(fence));
+       }
+       trace_sync_wait(fence, 0);
+
+       if (err < 0)
+               return err;
+
+       if (fence->status < 0) {
+               pr_info("fence error %d on [%p]\n", fence->status, fence);
+               sync_dump();
+               return fence->status;
+       }
+
+       if (fence->status == 0) {
+               if (timeout > 0) {
+                       pr_info("fence timeout on [%p] after %dms\n", fence,
+                               jiffies_to_msecs(timeout));
+                       sync_dump();
+               }
+               return -ETIME;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(sync_fence_wait);
+
+static void sync_fence_free(struct kref *kref)
+{
+       struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+
+       sync_fence_free_pts(fence);
+
+       kfree(fence);
+}
+
+static int sync_fence_release(struct inode *inode, struct file *file)
+{
+       struct sync_fence *fence = file->private_data;
+       unsigned long flags;
+
+       /*
+        * We need to remove all ways to access this fence before droping
+        * our ref.
+        *
+        * start with its membership in the global fence list
+        */
+       spin_lock_irqsave(&sync_fence_list_lock, flags);
+       list_del(&fence->sync_fence_list);
+       spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
+       /*
+        * remove its pts from their parents so that sync_timeline_signal()
+        * can't reference the fence.
+        */
+       sync_fence_detach_pts(fence);
+
+       kref_put(&fence->kref, sync_fence_free);
+
+       return 0;
+}
+
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
+{
+       struct sync_fence *fence = file->private_data;
+
+       poll_wait(file, &fence->wq, wait);
+
+       /*
+        * Make sure that reads to fence->status are ordered with the
+        * wait queue event triggering
+        */
+       smp_rmb();
+
+       if (fence->status == 1)
+               return POLLIN;
+       else if (fence->status < 0)
+               return POLLERR;
+       else
+               return 0;
+}
+
+static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
+{
+       __s32 value;
+
+       if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+               return -EFAULT;
+
+       return sync_fence_wait(fence, value);
+}
+
+static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
+{
+       int fd = get_unused_fd();
+       int err;
+       struct sync_fence *fence2, *fence3;
+       struct sync_merge_data data;
+
+       if (fd < 0)
+               return fd;
+
+       if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+               err = -EFAULT;
+               goto err_put_fd;
+       }
+
+       fence2 = sync_fence_fdget(data.fd2);
+       if (fence2 == NULL) {
+               err = -ENOENT;
+               goto err_put_fd;
+       }
+
+       data.name[sizeof(data.name) - 1] = '\0';
+       fence3 = sync_fence_merge(data.name, fence, fence2);
+       if (fence3 == NULL) {
+               err = -ENOMEM;
+               goto err_put_fence2;
+       }
+
+       data.fence = fd;
+       if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+               err = -EFAULT;
+               goto err_put_fence3;
+       }
+
+       sync_fence_install(fence3, fd);
+       sync_fence_put(fence2);
+       return 0;
+
+err_put_fence3:
+       sync_fence_put(fence3);
+
+err_put_fence2:
+       sync_fence_put(fence2);
+
+err_put_fd:
+       put_unused_fd(fd);
+       return err;
+}
+
+static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
+{
+       struct sync_pt_info *info = data;
+       int ret;
+
+       if (size < sizeof(struct sync_pt_info))
+               return -ENOMEM;
+
+       info->len = sizeof(struct sync_pt_info);
+
+       if (pt->parent->ops->fill_driver_data) {
+               ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
+                                                       size - sizeof(*info));
+               if (ret < 0)
+                       return ret;
+
+               info->len += ret;
+       }
+
+       strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
+       strlcpy(info->driver_name, pt->parent->ops->driver_name,
+               sizeof(info->driver_name));
+       info->status = pt->status;
+       info->timestamp_ns = ktime_to_ns(pt->timestamp);
+
+       return info->len;
+}
+
+static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
+                                       unsigned long arg)
+{
+       struct sync_fence_info_data *data;
+       struct list_head *pos;
+       __u32 size;
+       __u32 len = 0;
+       int ret;
+
+       if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
+               return -EFAULT;
+
+       if (size < sizeof(struct sync_fence_info_data))
+               return -EINVAL;
+
+       if (size > 4096)
+               size = 4096;
+
+       data = kzalloc(size, GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       strlcpy(data->name, fence->name, sizeof(data->name));
+       data->status = fence->status;
+       len = sizeof(struct sync_fence_info_data);
+
+       list_for_each(pos, &fence->pt_list_head) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, pt_list);
+
+               ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
+
+               if (ret < 0)
+                       goto out;
+
+               len += ret;
+       }
+
+       data->len = len;
+
+       if (copy_to_user((void __user *)arg, data, len))
+               ret = -EFAULT;
+       else
+               ret = 0;
+
+out:
+       kfree(data);
+
+       return ret;
+}
+
+static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       struct sync_fence *fence = file->private_data;
+       switch (cmd) {
+       case SYNC_IOC_WAIT:
+               return sync_fence_ioctl_wait(fence, arg);
+
+       case SYNC_IOC_MERGE:
+               return sync_fence_ioctl_merge(fence, arg);
+
+       case SYNC_IOC_FENCE_INFO:
+               return sync_fence_ioctl_fence_info(fence, arg);
+
+       default:
+               return -ENOTTY;
+       }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const char *sync_status_str(int status)
+{
+       if (status > 0)
+               return "signaled";
+       else if (status == 0)
+               return "active";
+       else
+               return "error";
+}
+
+static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
+{
+       int status = pt->status;
+       seq_printf(s, "  %s%spt %s",
+                  fence ? pt->parent->name : "",
+                  fence ? "_" : "",
+                  sync_status_str(status));
+       if (pt->status) {
+               struct timeval tv = ktime_to_timeval(pt->timestamp);
+               seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
+       }
+
+       if (pt->parent->ops->timeline_value_str &&
+           pt->parent->ops->pt_value_str) {
+               char value[64];
+               pt->parent->ops->pt_value_str(pt, value, sizeof(value));
+               seq_printf(s, ": %s", value);
+               if (fence) {
+                       pt->parent->ops->timeline_value_str(pt->parent, value,
+                                                   sizeof(value));
+                       seq_printf(s, " / %s", value);
+               }
+       } else if (pt->parent->ops->print_pt) {
+               seq_printf(s, ": ");
+               pt->parent->ops->print_pt(s, pt);
+       }
+
+       seq_printf(s, "\n");
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+       struct list_head *pos;
+       unsigned long flags;
+
+       seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
+
+       if (obj->ops->timeline_value_str) {
+               char value[64];
+               obj->ops->timeline_value_str(obj, value, sizeof(value));
+               seq_printf(s, ": %s", value);
+       } else if (obj->ops->print_obj) {
+               seq_printf(s, ": ");
+               obj->ops->print_obj(s, obj);
+       }
+
+       seq_printf(s, "\n");
+
+       spin_lock_irqsave(&obj->child_list_lock, flags);
+       list_for_each(pos, &obj->child_list_head) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, child_list);
+               sync_print_pt(s, pt, false);
+       }
+       spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
+{
+       struct list_head *pos;
+       unsigned long flags;
+
+       seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
+                  sync_status_str(fence->status));
+
+       list_for_each(pos, &fence->pt_list_head) {
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, pt_list);
+               sync_print_pt(s, pt, true);
+       }
+
+       spin_lock_irqsave(&fence->waiter_list_lock, flags);
+       list_for_each(pos, &fence->waiter_list_head) {
+               struct sync_fence_waiter *waiter =
+                       container_of(pos, struct sync_fence_waiter,
+                                    waiter_list);
+
+               seq_printf(s, "waiter %pF\n", waiter->callback);
+       }
+       spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+}
+
+static int sync_debugfs_show(struct seq_file *s, void *unused)
+{
+       unsigned long flags;
+       struct list_head *pos;
+
+       seq_printf(s, "objs:\n--------------\n");
+
+       spin_lock_irqsave(&sync_timeline_list_lock, flags);
+       list_for_each(pos, &sync_timeline_list_head) {
+               struct sync_timeline *obj =
+                       container_of(pos, struct sync_timeline,
+                                    sync_timeline_list);
+
+               sync_print_obj(s, obj);
+               seq_printf(s, "\n");
+       }
+       spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+       seq_printf(s, "fences:\n--------------\n");
+
+       spin_lock_irqsave(&sync_fence_list_lock, flags);
+       list_for_each(pos, &sync_fence_list_head) {
+               struct sync_fence *fence =
+                       container_of(pos, struct sync_fence, sync_fence_list);
+
+               sync_print_fence(s, fence);
+               seq_printf(s, "\n");
+       }
+       spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+       return 0;
+}
+
+static int sync_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, sync_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations sync_debugfs_fops = {
+       .open           = sync_debugfs_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static __init int sync_debugfs_init(void)
+{
+       debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
+       return 0;
+}
+late_initcall(sync_debugfs_init);
+
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+       struct seq_file s = {
+               .buf = sync_dump_buf,
+               .size = sizeof(sync_dump_buf) - 1,
+       };
+       int i;
+
+       sync_debugfs_show(&s, NULL);
+
+       for (i = 0; i < s.count; i += DUMP_CHUNK) {
+               if ((s.count - i) > DUMP_CHUNK) {
+                       char c = s.buf[i + DUMP_CHUNK];
+                       s.buf[i + DUMP_CHUNK] = 0;
+                       pr_cont("%s", s.buf + i);
+                       s.buf[i + DUMP_CHUNK] = c;
+               } else {
+                       s.buf[s.count] = 0;
+                       pr_cont("%s", s.buf + i);
+               }
+       }
+}
+#else
+static void sync_dump(void)
+{
+}
+#endif
index 72bedad6bf8cca35cc93593f5112d71216ed677a..f382896fb2b920e7f4cd76ca7ea9712e6382fdb0 100644 (file)
@@ -6,6 +6,19 @@ menu "Character devices"
 
 source "drivers/tty/Kconfig"
 
+config DEVMEM
+       bool "Memory device driver"
+       default y
+       help
+         The memory driver provides two character devices, mem and kmem, which
+         provide access to the system's memory. The mem device is a view of
+         physical memory, and each byte in the device corresponds to the
+         matching physical address. The kmem device is the same as mem, but
+         the addresses correspond to the kernel's virtual address space rather
+         than physical memory. These devices are standard parts of a Linux
+         system and most users should say Y here. You might say N if very
+         security conscience or memory is tight.
+
 config DEVKMEM
        bool "/dev/kmem virtual device support"
        default y
@@ -583,6 +596,10 @@ config DEVPORT
        depends on ISA || PCI
        default y
 
+config DCC_TTY
+       tristate "DCC tty driver"
+       depends on ARM
+
 source "drivers/s390/char/Kconfig"
 
 config MSM_SMD_PKT
index 7ff1d0d208a7c3f89f6827fd3e9ba8815dad202d..e0047ed1e74c3b20f8086d6856f82b16876ab442 100644 (file)
@@ -56,6 +56,7 @@ obj-$(CONFIG_PCMCIA)          += pcmcia/
 obj-$(CONFIG_HANGCHECK_TIMER)  += hangcheck-timer.o
 obj-$(CONFIG_TCG_TPM)          += tpm/
 
+obj-$(CONFIG_DCC_TTY)          += dcc_tty.o
 obj-$(CONFIG_PS3_FLASH)                += ps3flash.o
 
 obj-$(CONFIG_JS_RTC)           += js-rtc.o
diff --git a/drivers/char/dcc_tty.c b/drivers/char/dcc_tty.c
new file mode 100644 (file)
index 0000000..a787acc
--- /dev/null
@@ -0,0 +1,326 @@
+/* drivers/char/dcc_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+#include <linux/hrtimer.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+MODULE_DESCRIPTION("DCC TTY Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+static spinlock_t g_dcc_tty_lock = SPIN_LOCK_UNLOCKED;
+static struct hrtimer g_dcc_timer;
+static char g_dcc_buffer[16];
+static int g_dcc_buffer_head;
+static int g_dcc_buffer_count;
+static unsigned g_dcc_write_delay_usecs = 1;
+static struct tty_driver *g_dcc_tty_driver;
+static struct tty_struct *g_dcc_tty;
+static int g_dcc_tty_open_count;
+
+static void dcc_poll_locked(void)
+{
+       char ch;
+       int rch;
+       int written;
+
+       while (g_dcc_buffer_count) {
+               ch = g_dcc_buffer[g_dcc_buffer_head];
+               asm(
+                       "mrc 14, 0, r15, c0, c1, 0\n"
+                       "mcrcc 14, 0, %1, c0, c5, 0\n"
+                       "movcc %0, #1\n"
+                       "movcs %0, #0\n"
+                       : "=r" (written)
+                       : "r" (ch)
+               );
+               if (written) {
+                       if (ch == '\n')
+                               g_dcc_buffer[g_dcc_buffer_head] = '\r';
+                       else {
+                               g_dcc_buffer_head = (g_dcc_buffer_head + 1) % ARRAY_SIZE(g_dcc_buffer);
+                               g_dcc_buffer_count--;
+                               if (g_dcc_tty)
+                                       tty_wakeup(g_dcc_tty);
+                       }
+                       g_dcc_write_delay_usecs = 1;
+               } else {
+                       if (g_dcc_write_delay_usecs > 0x100)
+                               break;
+                       g_dcc_write_delay_usecs <<= 1;
+                       udelay(g_dcc_write_delay_usecs);
+               }
+       }
+
+       if (g_dcc_tty && !test_bit(TTY_THROTTLED, &g_dcc_tty->flags)) {
+               asm(
+                       "mrc 14, 0, %0, c0, c1, 0\n"
+                       "tst %0, #(1 << 30)\n"
+                       "moveq %0, #-1\n"
+                       "mrcne 14, 0, %0, c0, c5, 0\n"
+                       : "=r" (rch)
+               );
+               if (rch >= 0) {
+                       ch = rch;
+                       tty_insert_flip_string(g_dcc_tty, &ch, 1);
+                       tty_flip_buffer_push(g_dcc_tty);
+               }
+       }
+
+
+       if (g_dcc_buffer_count)
+               hrtimer_start(&g_dcc_timer, ktime_set(0, g_dcc_write_delay_usecs * NSEC_PER_USEC), HRTIMER_MODE_REL);
+       else
+               hrtimer_start(&g_dcc_timer, ktime_set(0, 20 * NSEC_PER_MSEC), HRTIMER_MODE_REL);
+}
+
+static int dcc_tty_open(struct tty_struct * tty, struct file * filp)
+{
+       int ret;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       if (g_dcc_tty == NULL || g_dcc_tty == tty) {
+               g_dcc_tty = tty;
+               g_dcc_tty_open_count++;
+               ret = 0;
+       } else
+               ret = -EBUSY;
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+
+       printk("dcc_tty_open, tty %p, f_flags %x, returned %d\n", tty, filp->f_flags, ret);
+
+       return ret;
+}
+
+static void dcc_tty_close(struct tty_struct * tty, struct file * filp)
+{
+       printk("dcc_tty_close, tty %p, f_flags %x\n", tty, filp->f_flags);
+       if (g_dcc_tty == tty) {
+               if (--g_dcc_tty_open_count == 0)
+                       g_dcc_tty = NULL;
+       }
+}
+
+static int dcc_write(const unsigned char *buf_start, int count)
+{
+       const unsigned char *buf = buf_start;
+       unsigned long irq_flags;
+       int copy_len;
+       int space_left;
+       int tail;
+
+       if (count < 1)
+               return 0;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       do {
+               tail = (g_dcc_buffer_head + g_dcc_buffer_count) % ARRAY_SIZE(g_dcc_buffer);
+               copy_len = ARRAY_SIZE(g_dcc_buffer) - tail;
+               space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+               if (copy_len > space_left)
+                       copy_len = space_left;
+               if (copy_len > count)
+                       copy_len = count;
+               memcpy(&g_dcc_buffer[tail], buf, copy_len);
+               g_dcc_buffer_count += copy_len;
+               buf += copy_len;
+               count -= copy_len;
+               if (copy_len < count && copy_len < space_left) {
+                       space_left -= copy_len;
+                       copy_len = count;
+                       if (copy_len > space_left) {
+                               copy_len = space_left;
+                       }
+                       memcpy(g_dcc_buffer, buf, copy_len);
+                       buf += copy_len;
+                       count -= copy_len;
+                       g_dcc_buffer_count += copy_len;
+               }
+               dcc_poll_locked();
+               space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+       } while(count && space_left);
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+       return buf - buf_start;
+}
+
+static int dcc_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
+{
+       int ret;
+       /* printk("dcc_tty_write %p, %d\n", buf, count); */
+       ret = dcc_write(buf, count);
+       if (ret != count)
+               printk("dcc_tty_write %p, %d, returned %d\n", buf, count, ret);
+       return ret;
+}
+
+static int dcc_tty_write_room(struct tty_struct *tty)
+{
+       int space_left;
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       space_left = ARRAY_SIZE(g_dcc_buffer) - g_dcc_buffer_count;
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+       return space_left;
+}
+
+static int dcc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+       int ret;
+       asm(
+               "mrc 14, 0, %0, c0, c1, 0\n"
+               "mov %0, %0, LSR #30\n"
+               "and %0, %0, #1\n"
+               : "=r" (ret)
+       );
+       return ret;
+}
+
+static void dcc_tty_unthrottle(struct tty_struct * tty)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       dcc_poll_locked();
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+}
+
+static enum hrtimer_restart dcc_tty_timer_func(struct hrtimer *timer)
+{
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&g_dcc_tty_lock, irq_flags);
+       dcc_poll_locked();
+       spin_unlock_irqrestore(&g_dcc_tty_lock, irq_flags);
+       return HRTIMER_NORESTART;
+}
+
+void dcc_console_write(struct console *co, const char *b, unsigned count)
+{
+#if 1
+       dcc_write(b, count);
+#else
+       /* blocking printk */
+       while (count > 0) {
+               int written;
+               written = dcc_write(b, count);
+               if (written) {
+                       b += written;
+                       count -= written;
+               }
+       }
+#endif
+}
+
+static struct tty_driver *dcc_console_device(struct console *c, int *index)
+{
+       *index = 0;
+       return g_dcc_tty_driver;
+}
+
+static int __init dcc_console_setup(struct console *co, char *options)
+{
+       if (co->index != 0)
+               return -ENODEV;
+       return 0;
+}
+
+
+static struct console dcc_console =
+{
+       .name           = "ttyDCC",
+       .write          = dcc_console_write,
+       .device         = dcc_console_device,
+       .setup          = dcc_console_setup,
+       .flags          = CON_PRINTBUFFER,
+       .index          = -1,
+};
+
+static struct tty_operations dcc_tty_ops = {
+       .open = dcc_tty_open,
+       .close = dcc_tty_close,
+       .write = dcc_tty_write,
+       .write_room = dcc_tty_write_room,
+       .chars_in_buffer = dcc_tty_chars_in_buffer,
+       .unthrottle = dcc_tty_unthrottle,
+};
+
+static int __init dcc_tty_init(void)
+{
+       int ret;
+
+       hrtimer_init(&g_dcc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       g_dcc_timer.function = dcc_tty_timer_func;
+
+       g_dcc_tty_driver = alloc_tty_driver(1);
+       if (!g_dcc_tty_driver) {
+               printk(KERN_ERR "dcc_tty_probe: alloc_tty_driver failed\n");
+               ret = -ENOMEM;
+               goto err_alloc_tty_driver_failed;
+       }
+       g_dcc_tty_driver->owner = THIS_MODULE;
+       g_dcc_tty_driver->driver_name = "dcc";
+       g_dcc_tty_driver->name = "ttyDCC";
+       g_dcc_tty_driver->major = 0; // auto assign
+       g_dcc_tty_driver->minor_start = 0;
+       g_dcc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+       g_dcc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+       g_dcc_tty_driver->init_termios = tty_std_termios;
+       g_dcc_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+       tty_set_operations(g_dcc_tty_driver, &dcc_tty_ops);
+       ret = tty_register_driver(g_dcc_tty_driver);
+       if (ret) {
+               printk(KERN_ERR "dcc_tty_probe: tty_register_driver failed, %d\n", ret);
+               goto err_tty_register_driver_failed;
+       }
+       tty_register_device(g_dcc_tty_driver, 0, NULL);
+
+       register_console(&dcc_console);
+       hrtimer_start(&g_dcc_timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+       return 0;
+
+err_tty_register_driver_failed:
+       put_tty_driver(g_dcc_tty_driver);
+       g_dcc_tty_driver = NULL;
+err_alloc_tty_driver_failed:
+       return ret;
+}
+
+static void  __exit dcc_tty_exit(void)
+{
+       int ret;
+
+       tty_unregister_device(g_dcc_tty_driver, 0);
+       ret = tty_unregister_driver(g_dcc_tty_driver);
+       if (ret < 0) {
+               printk(KERN_ERR "dcc_tty_remove: tty_unregister_driver failed, %d\n", ret);
+       } else {
+               put_tty_driver(g_dcc_tty_driver);
+       }
+       g_dcc_tty_driver = NULL;
+}
+
+module_init(dcc_tty_init);
+module_exit(dcc_tty_exit);
+
+
index c6fa3bc2baa89cfec662945dcc9818dadfe0d44c..67238abc3776b20a045046a503c85ec89a9bcc32 100644 (file)
@@ -59,6 +59,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 }
 #endif
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 #ifdef CONFIG_STRICT_DEVMEM
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
@@ -84,7 +85,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        return 1;
 }
 #endif
+#endif
 
+#ifdef CONFIG_DEVMEM
 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 {
 }
@@ -211,6 +214,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
        *ppos += written;
        return written;
 }
+#endif /* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
 
 int __weak phys_mem_access_prot_allowed(struct file *file,
        unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -332,6 +338,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
        }
        return 0;
 }
+#endif /* CONFIG_DEVMEM */
 
 #ifdef CONFIG_DEVKMEM
 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -696,6 +703,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig)
        return file->f_pos = 0;
 }
 
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
 /*
  * The memory devices use the full 32/64 bits of the offset, and so we cannot
  * check against negative addresses: they are ok. The return value is weird,
@@ -729,10 +738,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
        return ret;
 }
 
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
 static int open_port(struct inode * inode, struct file * filp)
 {
        return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 }
+#endif
 
 #define zero_lseek     null_lseek
 #define full_lseek      null_lseek
@@ -742,6 +755,7 @@ static int open_port(struct inode * inode, struct file * filp)
 #define open_kmem      open_mem
 #define open_oldmem    open_mem
 
+#ifdef CONFIG_DEVMEM
 static const struct file_operations mem_fops = {
        .llseek         = memory_lseek,
        .read           = read_mem,
@@ -750,6 +764,7 @@ static const struct file_operations mem_fops = {
        .open           = open_mem,
        .get_unmapped_area = get_unmapped_area_mem,
 };
+#endif
 
 #ifdef CONFIG_DEVKMEM
 static const struct file_operations kmem_fops = {
@@ -815,7 +830,9 @@ static const struct memdev {
        const struct file_operations *fops;
        struct backing_dev_info *dev_info;
 } devlist[] = {
+#ifdef CONFIG_DEVMEM
         [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
 #ifdef CONFIG_DEVKMEM
         [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
 #endif
index cbcb21e32771df7bf6fb0b18ce9033f5cdc779a5..d0a97071d4bf9bfa7db943b33fd826ec408e5efd 100644 (file)
@@ -102,6 +102,16 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
          Be aware that not all cpufreq drivers support the conservative
          governor. If unsure have a look at the help section of the
          driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+       bool "interactive"
+       select CPU_FREQ_GOV_INTERACTIVE
+       help
+         Use the CPUFreq governor 'interactive' as default. This allows
+         you to get a full dynamic cpu frequency capable system by simply
+         loading your cpufreq low-level hardware driver, using the
+         'interactive' governor for latency-sensitive workloads.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -160,6 +170,23 @@ config CPU_FREQ_GOV_ONDEMAND
 
          If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+       tristate "'interactive' cpufreq policy governor"
+       help
+         'interactive' - This driver adds a dynamic cpufreq policy governor
+         designed for latency-sensitive workloads.
+
+         This governor attempts to reduce the latency of clock
+         increases so that the system is more responsive to
+         interactive workloads.
+
+         To compile this driver as a module, choose M here: the
+         module will be called cpufreq_interactive.
+
+         For details, take a look at linux/Documentation/cpu-freq.
+
+         If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
        tristate "'conservative' cpufreq governor"
        depends on CPU_FREQ
index fadc4d496e2fc55383c2f9ddf6c59b9ff2072ec3..64f1c2d382bcb6ad06464e458437befff8528855 100644 (file)
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)    += cpufreq_powersave.o
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)   += cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)    += cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)        += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)              += cpufreq_governor.o
 
 # CPUfreq cross-arch helpers
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644 (file)
index 0000000..7d1952c
--- /dev/null
@@ -0,0 +1,1066 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <asm/cputime.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+static int active_count;
+
+struct cpufreq_interactive_cpuinfo {
+       struct timer_list cpu_timer;
+       struct timer_list cpu_slack_timer;
+       spinlock_t load_lock; /* protects the next 4 fields */
+       u64 time_in_idle;
+       u64 time_in_idle_timestamp;
+       u64 cputime_speedadj;
+       u64 cputime_speedadj_timestamp;
+       struct cpufreq_policy *policy;
+       struct cpufreq_frequency_table *freq_table;
+       unsigned int target_freq;
+       unsigned int floor_freq;
+       u64 floor_validate_time;
+       u64 hispeed_validate_time;
+       struct rw_semaphore enable_sem;
+       int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+/* Hi speed to bump to from lo speed when load burst (default max) */
+static unsigned int hispeed_freq;
+
+/* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+static spinlock_t target_loads_lock;
+static unsigned int *target_loads = default_target_loads;
+static int ntarget_loads = ARRAY_SIZE(default_target_loads);
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp down.
+ */
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+
+/*
+ * The sample rate of the timer used to increase frequency
+ */
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+static unsigned long timer_rate = DEFAULT_TIMER_RATE;
+
+/*
+ * Wait this long before raising speed above hispeed, by default a single
+ * timer interval.
+ */
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned long above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
+
+/* Non-zero means indefinite speed boost active */
+static int boost_val;
+/* Duration of a boot pulse in usecs */
+static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+/* End time of boost pulse in ktime converted to usecs */
+static u64 boostpulse_endtime;
+
+/*
+ * Max additional time to wait in idle, beyond timer_rate, at speeds above
+ * minimum before wakeup to reduce speed, or -1 if unnecessary.
+ */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+static int timer_slack_val = DEFAULT_TIMER_SLACK;
+
+static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
+               unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactive = {
+       .name = "interactive",
+       .governor = cpufreq_governor_interactive,
+       .max_transition_latency = 10000000,
+       .owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_timer_resched(
+       struct cpufreq_interactive_cpuinfo *pcpu)
+{
+       unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
+       unsigned long flags;
+
+       mod_timer_pinned(&pcpu->cpu_timer, expires);
+       if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
+               expires += usecs_to_jiffies(timer_slack_val);
+               mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
+       }
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       pcpu->time_in_idle =
+               get_cpu_idle_time_us(smp_processor_id(),
+                                    &pcpu->time_in_idle_timestamp);
+       pcpu->cputime_speedadj = 0;
+       pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_targetload(unsigned int freq)
+{
+       int i;
+       unsigned int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&target_loads_lock, flags);
+
+       for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
+               ;
+
+       ret = target_loads[i];
+       spin_unlock_irqrestore(&target_loads_lock, flags);
+       return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+
+static unsigned int choose_freq(
+       struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
+{
+       unsigned int freq = pcpu->policy->cur;
+       unsigned int prevfreq, freqmin, freqmax;
+       unsigned int tl;
+       int index;
+
+       freqmin = 0;
+       freqmax = UINT_MAX;
+
+       do {
+               prevfreq = freq;
+               tl = freq_to_targetload(freq);
+
+               /*
+                * Find the lowest frequency where the computed load is less
+                * than or equal to the target load.
+                */
+
+               cpufreq_frequency_table_target(
+                       pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
+                       CPUFREQ_RELATION_L, &index);
+               freq = pcpu->freq_table[index].frequency;
+
+               if (freq > prevfreq) {
+                       /* The previous frequency is too low. */
+                       freqmin = prevfreq;
+
+                       if (freq >= freqmax) {
+                               /*
+                                * Find the highest frequency that is less
+                                * than freqmax.
+                                */
+                               cpufreq_frequency_table_target(
+                                       pcpu->policy, pcpu->freq_table,
+                                       freqmax - 1, CPUFREQ_RELATION_H,
+                                       &index);
+                               freq = pcpu->freq_table[index].frequency;
+
+                               if (freq == freqmin) {
+                                       /*
+                                        * The first frequency below freqmax
+                                        * has already been found to be too
+                                        * low.  freqmax is the lowest speed
+                                        * we found that is fast enough.
+                                        */
+                                       freq = freqmax;
+                                       break;
+                               }
+                       }
+               } else if (freq < prevfreq) {
+                       /* The previous frequency is high enough. */
+                       freqmax = prevfreq;
+
+                       if (freq <= freqmin) {
+                               /*
+                                * Find the lowest frequency that is higher
+                                * than freqmin.
+                                */
+                               cpufreq_frequency_table_target(
+                                       pcpu->policy, pcpu->freq_table,
+                                       freqmin + 1, CPUFREQ_RELATION_L,
+                                       &index);
+                               freq = pcpu->freq_table[index].frequency;
+
+                               /*
+                                * If freqmax is the first frequency above
+                                * freqmin then we have already found that
+                                * this speed is fast enough.
+                                */
+                               if (freq == freqmax)
+                                       break;
+                       }
+               }
+
+               /* If same frequency chosen as previous then done. */
+       } while (freq != prevfreq);
+
+       return freq;
+}
+
+static u64 update_load(int cpu)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+       u64 now;
+       u64 now_idle;
+       unsigned int delta_idle;
+       unsigned int delta_time;
+       u64 active_time;
+
+       now_idle = get_cpu_idle_time_us(cpu, &now);
+       delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+       delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+       active_time = delta_time - delta_idle;
+       pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+       pcpu->time_in_idle = now_idle;
+       pcpu->time_in_idle_timestamp = now;
+       return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+       u64 now;
+       unsigned int delta_time;
+       u64 cputime_speedadj;
+       int cpu_load;
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, data);
+       unsigned int new_freq;
+       unsigned int loadadjfreq;
+       unsigned int index;
+       unsigned long flags;
+       bool boosted;
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled)
+               goto exit;
+
+       spin_lock_irqsave(&pcpu->load_lock, flags);
+       now = update_load(data);
+       delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+       cputime_speedadj = pcpu->cputime_speedadj;
+       spin_unlock_irqrestore(&pcpu->load_lock, flags);
+
+       if (WARN_ON_ONCE(!delta_time))
+               goto rearm;
+
+       do_div(cputime_speedadj, delta_time);
+       loadadjfreq = (unsigned int)cputime_speedadj * 100;
+       cpu_load = loadadjfreq / pcpu->target_freq;
+       boosted = boost_val || now < boostpulse_endtime;
+
+       if (cpu_load >= go_hispeed_load || boosted) {
+               if (pcpu->target_freq < hispeed_freq) {
+                       new_freq = hispeed_freq;
+               } else {
+                       new_freq = choose_freq(pcpu, loadadjfreq);
+
+                       if (new_freq < hispeed_freq)
+                               new_freq = hispeed_freq;
+               }
+       } else {
+               new_freq = choose_freq(pcpu, loadadjfreq);
+       }
+
+       if (pcpu->target_freq >= hispeed_freq &&
+           new_freq > pcpu->target_freq &&
+           now - pcpu->hispeed_validate_time < above_hispeed_delay_val) {
+               trace_cpufreq_interactive_notyet(
+                       data, cpu_load, pcpu->target_freq,
+                       pcpu->policy->cur, new_freq);
+               goto rearm;
+       }
+
+       pcpu->hispeed_validate_time = now;
+
+       if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
+                                          new_freq, CPUFREQ_RELATION_L,
+                                          &index)) {
+               pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
+                            (int) data);
+               goto rearm;
+       }
+
+       new_freq = pcpu->freq_table[index].frequency;
+
+       /*
+        * Do not scale below floor_freq unless we have been at or above the
+        * floor frequency for the minimum sample time since last validated.
+        */
+       if (new_freq < pcpu->floor_freq) {
+               if (now - pcpu->floor_validate_time < min_sample_time) {
+                       trace_cpufreq_interactive_notyet(
+                               data, cpu_load, pcpu->target_freq,
+                               pcpu->policy->cur, new_freq);
+                       goto rearm;
+               }
+       }
+
+       /*
+        * Update the timestamp for checking whether speed has been held at
+        * or above the selected frequency for a minimum of min_sample_time,
+        * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
+        * allow the speed to drop as soon as the boostpulse duration expires
+        * (or the indefinite boost is turned off).
+        */
+
+       if (!boosted || new_freq > hispeed_freq) {
+               pcpu->floor_freq = new_freq;
+               pcpu->floor_validate_time = now;
+       }
+
+       if (pcpu->target_freq == new_freq) {
+               trace_cpufreq_interactive_already(
+                       data, cpu_load, pcpu->target_freq,
+                       pcpu->policy->cur, new_freq);
+               goto rearm_if_notmax;
+       }
+
+       trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+                                        pcpu->policy->cur, new_freq);
+
+       pcpu->target_freq = new_freq;
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+       cpumask_set_cpu(data, &speedchange_cpumask);
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+       wake_up_process(speedchange_task);
+
+rearm_if_notmax:
+       /*
+        * Already set max speed and don't see a need to change that,
+        * wait until next idle to re-evaluate, don't need timer.
+        */
+       if (pcpu->target_freq == pcpu->policy->max)
+               goto exit;
+
+rearm:
+       if (!timer_pending(&pcpu->cpu_timer))
+               cpufreq_interactive_timer_resched(pcpu);
+
+exit:
+       up_read(&pcpu->enable_sem);
+       return;
+}
+
+static void cpufreq_interactive_idle_start(void)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, smp_processor_id());
+       int pending;
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled) {
+               up_read(&pcpu->enable_sem);
+               return;
+       }
+
+       pending = timer_pending(&pcpu->cpu_timer);
+
+       if (pcpu->target_freq != pcpu->policy->min) {
+               /*
+                * Entering idle while not at lowest speed.  On some
+                * platforms this can hold the other CPU(s) at that speed
+                * even though the CPU is idle. Set a timer to re-evaluate
+                * speed so this idle CPU doesn't hold the other CPUs above
+                * min indefinitely.  This should probably be a quirk of
+                * the CPUFreq driver.
+                */
+               if (!pending)
+                       cpufreq_interactive_timer_resched(pcpu);
+       }
+
+       up_read(&pcpu->enable_sem);
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+       struct cpufreq_interactive_cpuinfo *pcpu =
+               &per_cpu(cpuinfo, smp_processor_id());
+
+       if (!down_read_trylock(&pcpu->enable_sem))
+               return;
+       if (!pcpu->governor_enabled) {
+               up_read(&pcpu->enable_sem);
+               return;
+       }
+
+       /* Arm the timer for 1-2 ticks later if not already. */
+       if (!timer_pending(&pcpu->cpu_timer)) {
+               cpufreq_interactive_timer_resched(pcpu);
+       } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+               del_timer(&pcpu->cpu_timer);
+               del_timer(&pcpu->cpu_slack_timer);
+               cpufreq_interactive_timer(smp_processor_id());
+       }
+
+       up_read(&pcpu->enable_sem);
+}
+
+static int cpufreq_interactive_speedchange_task(void *data)
+{
+       unsigned int cpu;
+       cpumask_t tmp_mask;
+       unsigned long flags;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+               if (cpumask_empty(&speedchange_cpumask)) {
+                       spin_unlock_irqrestore(&speedchange_cpumask_lock,
+                                              flags);
+                       schedule();
+
+                       if (kthread_should_stop())
+                               break;
+
+                       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+               }
+
+               set_current_state(TASK_RUNNING);
+               tmp_mask = speedchange_cpumask;
+               cpumask_clear(&speedchange_cpumask);
+               spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+               for_each_cpu(cpu, &tmp_mask) {
+                       unsigned int j;
+                       unsigned int max_freq = 0;
+
+                       pcpu = &per_cpu(cpuinfo, cpu);
+                       if (!down_read_trylock(&pcpu->enable_sem))
+                               continue;
+                       if (!pcpu->governor_enabled) {
+                               up_read(&pcpu->enable_sem);
+                               continue;
+                       }
+
+                       for_each_cpu(j, pcpu->policy->cpus) {
+                               struct cpufreq_interactive_cpuinfo *pjcpu =
+                                       &per_cpu(cpuinfo, j);
+
+                               if (pjcpu->target_freq > max_freq)
+                                       max_freq = pjcpu->target_freq;
+                       }
+
+                       if (max_freq != pcpu->policy->cur)
+                               __cpufreq_driver_target(pcpu->policy,
+                                                       max_freq,
+                                                       CPUFREQ_RELATION_H);
+                       trace_cpufreq_interactive_setspeed(cpu,
+                                                    pcpu->target_freq,
+                                                    pcpu->policy->cur);
+
+                       up_read(&pcpu->enable_sem);
+               }
+       }
+
+       return 0;
+}
+
+static void cpufreq_interactive_boost(void)
+{
+       int i;
+       int anyboost = 0;
+       unsigned long flags;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+
+       spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+       for_each_online_cpu(i) {
+               pcpu = &per_cpu(cpuinfo, i);
+
+               if (pcpu->target_freq < hispeed_freq) {
+                       pcpu->target_freq = hispeed_freq;
+                       cpumask_set_cpu(i, &speedchange_cpumask);
+                       pcpu->hispeed_validate_time =
+                               ktime_to_us(ktime_get());
+                       anyboost = 1;
+               }
+
+               /*
+                * Set floor freq and (re)start timer for when last
+                * validated.
+                */
+
+               pcpu->floor_freq = hispeed_freq;
+               pcpu->floor_validate_time = ktime_to_us(ktime_get());
+       }
+
+       spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+       if (anyboost)
+               wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_notifier(
+       struct notifier_block *nb, unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+       struct cpufreq_interactive_cpuinfo *pcpu;
+       int cpu;
+       unsigned long flags;
+
+       if (val == CPUFREQ_POSTCHANGE) {
+               pcpu = &per_cpu(cpuinfo, freq->cpu);
+               if (!down_read_trylock(&pcpu->enable_sem))
+                       return 0;
+               if (!pcpu->governor_enabled) {
+                       up_read(&pcpu->enable_sem);
+                       return 0;
+               }
+
+               for_each_cpu(cpu, pcpu->policy->cpus) {
+                       struct cpufreq_interactive_cpuinfo *pjcpu =
+                               &per_cpu(cpuinfo, cpu);
+                       spin_lock_irqsave(&pjcpu->load_lock, flags);
+                       update_load(cpu);
+                       spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+               }
+
+               up_read(&pcpu->enable_sem);
+       }
+       return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+       .notifier_call = cpufreq_interactive_notifier,
+};
+
+static ssize_t show_target_loads(
+       struct kobject *kobj, struct attribute *attr, char *buf)
+{
+       int i;
+       ssize_t ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&target_loads_lock, flags);
+
+       for (i = 0; i < ntarget_loads; i++)
+               ret += sprintf(buf + ret, "%u%s", target_loads[i],
+                              i & 0x1 ? ":" : " ");
+
+       ret += sprintf(buf + ret, "\n");
+       spin_unlock_irqrestore(&target_loads_lock, flags);
+       return ret;
+}
+
+static ssize_t store_target_loads(
+       struct kobject *kobj, struct attribute *attr, const char *buf,
+       size_t count)
+{
+       int ret;
+       const char *cp;
+       unsigned int *new_target_loads = NULL;
+       int ntokens = 1;
+       int i;
+       unsigned long flags;
+
+       cp = buf;
+       while ((cp = strpbrk(cp + 1, " :")))
+               ntokens++;
+
+       if (!(ntokens & 0x1))
+               goto err_inval;
+
+       new_target_loads = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+       if (!new_target_loads) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       cp = buf;
+       i = 0;
+       while (i < ntokens) {
+               if (sscanf(cp, "%u", &new_target_loads[i++]) != 1)
+                       goto err_inval;
+
+               cp = strpbrk(cp, " :");
+               if (!cp)
+                       break;
+               cp++;
+       }
+
+       if (i != ntokens)
+               goto err_inval;
+
+       spin_lock_irqsave(&target_loads_lock, flags);
+       if (target_loads != default_target_loads)
+               kfree(target_loads);
+       target_loads = new_target_loads;
+       ntarget_loads = ntokens;
+       spin_unlock_irqrestore(&target_loads_lock, flags);
+       return count;
+
+err_inval:
+       ret = -EINVAL;
+err:
+       kfree(new_target_loads);
+       return ret;
+}
+
+static struct global_attr target_loads_attr =
+       __ATTR(target_loads, S_IRUGO | S_IWUSR,
+               show_target_loads, store_target_loads);
+
+static ssize_t show_hispeed_freq(struct kobject *kobj,
+                                struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct kobject *kobj,
+                                 struct attribute *attr, const char *buf,
+                                 size_t count)
+{
+       int ret;
+       long unsigned int val;
+
+       ret = strict_strtoul(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+       hispeed_freq = val;
+       return count;
+}
+
+static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
+               show_hispeed_freq, store_hispeed_freq);
+
+
+static ssize_t show_go_hispeed_load(struct kobject *kobj,
+                                    struct attribute *attr, char *buf)
+{
+       return sprintf(buf, "%lu\n", go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct kobject *kobj,
+                       struct attribute *attr, const char *buf, size_t count)
+{
+       int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 0, &